text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
''' CacheFeederAgent
This agent feeds the Cache tables with the outputs of the cache commands.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN CacheFeederAgent
:end-before: ##END
:dedent: 2
:caption: CacheFeederAgent options
'''
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.ResourceStatusSystem.Command import CommandCaller
AGENT_NAME = 'ResourceStatus/CacheFeederAgent'
class CacheFeederAgent(AgentModule):
'''
The CacheFeederAgent feeds the cache tables for the client and the accounting.
It runs periodically a set of commands, and stores it's results on the
tables.
'''
def __init__(self, *args, **kwargs):
AgentModule.__init__(self, *args, **kwargs)
self.commands = {}
self.clients = {}
self.cCaller = None
self.rmClient = None
def initialize(self):
""" Define the commands to be executed, and instantiate the clients that will be used.
"""
self.am_setOption('shifterProxy', 'DataManager')
res = ObjectLoader().loadObject('DIRAC.ResourceStatusSystem.Client.ResourceStatusClient',
'ResourceStatusClient')
if not res['OK']:
self.log.error('Failed to load ResourceStatusClient class: %s' % res['Message'])
return res
rsClass = res['Value']
res = ObjectLoader().loadObject('DIRAC.ResourceStatusSystem.Client.ResourceManagementClient',
'ResourceManagementClient')
if not res['OK']:
self.log.error('Failed to load ResourceManagementClient class: %s' % res['Message'])
return res
rmClass = res['Value']
self.rmClient = rmClass()
self.commands['Downtime'] = [{'Downtime': {}}]
self.commands['GOCDBSync'] = [{'GOCDBSync': {}}]
self.commands['FreeDiskSpace'] = [{'FreeDiskSpace': {}}]
# PilotsCommand
# self.commands[ 'Pilots' ] = [
# { 'PilotsWMS' : { 'element' : 'Site', 'siteName' : None } },
# { 'PilotsWMS' : { 'element' : 'Resource', 'siteName' : None } }
# ]
# FIXME: do not forget about hourly vs Always ...etc
# AccountingCacheCommand
# self.commands[ 'AccountingCache' ] = [
# {'SuccessfullJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'FailedJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'SuccessfullPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'SuccessfullPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'RunningJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :168, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :720, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :8760, 'plotType' :'Job' }},
# ]
# VOBOXAvailability
# self.commands[ 'VOBOXAvailability' ] = [
# { 'VOBOXAvailability' : {} }
#
# Reuse clients for the commands
self.clients['GOCDBClient'] = GOCDBClient()
self.clients['ReportGenerator'] = RPCClient('Accounting/ReportGenerator')
self.clients['ReportsClient'] = ReportsClient()
self.clients['ResourceStatusClient'] = rsClass()
self.clients['ResourceManagementClient'] = rmClass()
self.clients['WMSAdministrator'] = RPCClient('WorkloadManagement/WMSAdministrator')
self.cCaller = CommandCaller
return S_OK()
def loadCommand(self, commandModule, commandDict):
""" Loads and executes commands.
:param commandModule: Name of the command (e.g. 'Downtime')
:type commandModule: basestring
:param commandDict: dictionary of {'CommandClass':{arguments}}
:type commandDict: dict
"""
commandName = commandDict.keys()[0]
commandArgs = commandDict[commandName]
commandTuple = ('%sCommand' % commandModule, '%sCommand' % commandName)
commandObject = self.cCaller.commandInvocation(commandTuple, pArgs=commandArgs,
clients=self.clients)
if not commandObject['OK']:
self.log.error('Error initializing %s' % commandName)
return commandObject
commandObject = commandObject['Value']
# Set master mode
commandObject.masterMode = True
self.log.info('%s/%s' % (commandModule, commandName))
return S_OK(commandObject)
def execute(self):
""" Just executes, via `loadCommand`, the commands in self.commands one after the other
"""
for commandModule, commandList in self.commands.iteritems():
self.log.info('%s module initialization' % commandModule)
for commandDict in commandList:
commandObject = self.loadCommand(commandModule, commandDict)
if not commandObject['OK']:
self.log.error(commandObject['Message'])
continue
commandObject = commandObject['Value']
try:
results = commandObject.doCommand()
if not results['OK']:
self.log.error('Failed to execute command', '%s: %s' % (commandModule, results['Message']))
continue
results = results['Value']
if not results:
self.log.info('Empty results')
continue
self.log.verbose('Command OK Results')
self.log.verbose(results)
except Exception as excp: # pylint: disable=broad-except
self.log.exception("Failed to execute command, with exception: %s" % commandModule, lException=excp)
return S_OK()
|
petricm/DIRAC
|
ResourceStatusSystem/Agent/CacheFeederAgent.py
|
Python
|
gpl-3.0
| 6,425
|
[
"DIRAC"
] |
ba00d2814ee72009da4ee62625e8f1fba6e68667041aca102460b5010c7b7e48
|
'''
Check the speed of different Brian 2 configurations (with additional models for brian2cuda)
'''
import brian2
from brian2 import *
from brian2.tests.features import SpeedTest
from brian2.tests.features.speed import *
from brian2cuda.tests.features.cuda_configuration import insert_benchmark_point
from brian2.tests.features.speed import __all__
__all__.extend(['DenseMediumRateSynapsesOnlyHeterogeneousDelays',
'SparseLowRateSynapsesOnlyHeterogeneousDelays',
'COBAHHUncoupled',
'COBAHHCoupled',
'COBAHHPseudocoupled1000',
'COBAHHPseudocoupled80',
'BrunelHakimHomogDelays',
'BrunelHakimHeterogDelays',
'BrunelHakimHeterogDelaysNarrowDistr',
'CUBAFixedConnectivityNoMonitor',
'STDPCUDA',
'STDPCUDAHomogeneousDelays',
'STDPCUDAHeterogeneousDelays',
'STDPCUDAHeterogeneousDelaysNarrowDistr',
'STDPCUDARandomConnectivityHomogeneousDelays',
'STDPCUDARandomConnectivityHeterogeneousDelays',
'STDPCUDARandomConnectivityHeterogeneousDelaysNarrowDistr',
'STDPCUDANoPostEffects',
'STDPEventDriven',
'MushroomBody'
])
# Add a time measurement of the `brian2.run()` call in the `timed_run()` calls
class TimedSpeedTest(SpeedTest):
def __init__(self, n):
self.runtime = None
super().__init__(n)
def timed_run(self, duration):
start = time.time()
# Can't use `super().timed_run()` since the `level` argument would be too low
brian2.run(duration, level=1)
self.runtime = time.time() - start
class COBAHHBase(TimedSpeedTest):
"""Base class for COBAHH benchmarks with different connectivity"""
category = "Full examples"
n_label = 'Num neurons'
# configuration options
duration = 10*second
uncoupled = False
# if not uncoupled, these need to be set in child class
we = None
wi = None
p = lambda self, n: None
def run(self):
# preference for memory saving
prefs['devices.cuda_standalone.no_pre_references'] = True
# Parameters
area = 20000*umetre**2
Cm = (1*ufarad*cm**-2) * area
gl = (5e-5*siemens*cm**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cm**-2) * area
g_kd = (30*msiemens*cm**-2) * area
VT = -63*mV
# Time constants
taue = 5*ms
taui = 10*ms
# Reversal potentials
Ee = 0*mV
Ei = -80*mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
dge/dt = -ge*(1./taue) : siemens
dgi/dt = -gi*(1./taui) : siemens
alpha_m = 0.32*(mV**-1)*(13.*mV-v+VT)/
(exp((13.*mV-v+VT)/(4.*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1.)*(v-VT-40.*mV)/
(exp((v-VT-40.*mV)/(5.*mV))-1.)/ms : Hz
alpha_h = 0.128*exp((17.*mV-v+VT)/(18.*mV))/ms : Hz
beta_h = 4./(1.+exp((40.*mV-v+VT)/(5.*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1.)*(15.*mV-v+VT)/
(exp((15.*mV-v+VT)/(5.*mV))-1.)/ms : Hz
beta_n = .5*exp((10.*mV-v+VT)/(40.*mV))/ms : Hz
''')
P = NeuronGroup(self.n, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='exponential_euler')
if not self.uncoupled:
logger.info(
f"Connecting synapses with\n"
f"\tprobability p(N={self.n}) = {self.p(self.n)}\n"
f"\twe={self.we}\n"
f"\twi={self.wi}"
)
num_exc = int(0.8 * self.n)
Pe = P[:num_exc]
Pi = P[num_exc:]
Ce = Synapses(Pe, P, 'we : siemens (constant)', on_pre='ge+=we', delay=0*ms)
Ci = Synapses(Pi, P, 'wi : siemens (constant)', on_pre='gi+=wi', delay=0*ms)
# connection probability p can depend on network size n
insert_benchmark_point("before_synapses_connect")
Ce.connect(p=self.p(self.n))
Ci.connect(p=self.p(self.n))
insert_benchmark_point("after_synapses_connect")
Ce.we = self.we # excitatory synaptic weight
Ci.wi = self.wi # inhibitory synaptic weight
else:
logger.info("Simulating in uncoupled mode. No synapses.")
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
P.ge = '(randn() * 1.5 + 4) * 10.*nS'
P.gi = '(randn() * 12 + 20) * 10.*nS'
self.timed_run(self.duration)
class COBAHHUncoupled(COBAHHBase):
"""COBAHH from brian2 examples but without synapses and without monitors"""
name = "COBAHH uncoupled (no synapses, no monitors)"
# TITAN X
#n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8] #fail: 131250000
# A100 40GB
n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5]#, np.log10(475000000)] #fail: 475000000 (~10**8.68)
n_range = [int(10**p) for p in n_power]
uncoupled = True
class COBAHHCoupled(COBAHHBase):
"""COBAHH from brian2 examples without monitors"""
name = "COBAHH (brian2 example, 2% coupling probabiliy, no monitors)"
n_range = [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 3781250] #pass:3781250, fail: 3812500
p = lambda self, n: 0.02 # connection probability
we = 6 * nS # excitatory synaptic weight
wi = 67 * nS # inhibitory synaptic weight
class COBAHHPseudocoupled1000(COBAHHBase):
"""
COBAHH with 1000 synapses per neuron and all weights set to very small
values, s.t. they effectively have not effect while copmiler optimisations
are avoided (for better comparibility with the coupled case). This
benchmark is used in in the Brian2GeNN paper. No monitors.
"""
name = "COBAHH (1000 syn/neuron, weights zero, no monitors)"
#n_range = [100, 500, 1000, 5000, 10000, 20000, 40000, 80000, 150000, 300000] #pass: 384962, fail: 390235
# TITAN X
#n_power = [2, 2.33, 2.66, 3, 3.33, 3.66, 4, 4.33, 4.66, 5, 5.33, log10(384962)] #pass: 384962, fail: 390235
# A100 40GB
n_power = [2, 2.33, 2.66, 3, 3.33, 3.66, 4, 4.33, 4.66, 5, 5.33, 5.66, 6] #pass: 1154883 (~10**6.06), fail: ?
n_range = [int(10**p) for p in n_power]
# fixed connectivity: 1000 neurons per synapse
p = lambda self, n: 1000. / n
# weights set to tiny values, s.t. they are effectively zero but don't
# result in compiler optimisations
we = wi = 'rand() * 1e-9*nS'
class COBAHHPseudocoupledZeroWeights1000(COBAHHPseudocoupled1000):
"""
COBAHH with 1000 synapses per neuron and all weights set to zero and
without monitors.
"""
name = "COBAHH (1000 syn/neuron, weights zero, no monitors)"
we = wi = 0
class COBAHHPseudocoupled80(COBAHHBase):
"""
COBAHH with 80 synapses per neuron and all weights set to very small
values, s.t. they effectively have not effect while copmiler optimisations
are avoided (for better comparibility with the coupled case). This
benchmark with 1000 synapses per neuron is is used in in the Brian2GeNN
paper. No monitors.
"""
name = "COBAHH (80 syn/neuron, weights zero, no monitors)"
#n_range = [100, 500, 1000, 5000, 10000, 20000, 40000, 80000, 150000, 300000, 900000, 3500000]
# TITAN X
#n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5] #pass: 3625000, fail: 3632813
# A100 40GB
n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7] #pass: 9486831 (~10**6.98), fail: ?
n_range = [int(10**p) for p in n_power]
# fixed connectivity: 80 neurons per synapse
p = lambda self, n: 80. / n
# weights set to tiny values, s.t. they are effectively zero but don't
# result in compiler optimisations
we = wi = 'rand() * 1e-9*nS'
class COBAHHPseudocoupledZeroWeights80(COBAHHPseudocoupled80):
"""
COBAHH with 80 synapses per neuron and all weights set to zero and without
monitors.
"""
name = "COBAHH (80 syn/neuron, weights zero, no monitors)"
we = wi = 0
class BrunelHakimBase(TimedSpeedTest):
"""
Base class for BrunelHakim benchmarks with different delay
distributions
"""
category = "Full examples"
n_label = 'Num neurons'
# configuration options
duration = 10*second
# need to be set in child class
sigmaext = None # vold
muext = None # volt
# homogeneous delays
homog_delay = None # second
# heterogeneous delays
heterog_delay = None # string syntax
def run(self):
# preference for memory saving
prefs['devices.cuda_standalone.no_pre_references'] = True
assert not (self.heterog_delay is not None and
self.homog_delay is not None), \
"Can't set homog_delay and heterog_delay"
Vr = 10*mV
theta = 20*mV
tau = 20*ms
delta = 2*ms
taurefr = 2*ms
C = 1000
sparseness = float(C)/self.n
J = .1*mV
muext = self.muext
sigmaext = self.sigmaext
logger.info(f"Simulating Brunel Hakim with muext={muext}, sigmaext={sigmaext}")
eqs = """
dV/dt = (-V+muext + sigmaext * sqrt(tau) * xi)/tau : volt
"""
group = NeuronGroup(self.n, eqs, threshold='V>theta',
reset='V=Vr', refractory=taurefr)
group.V = Vr
conn = Synapses(group, group, on_pre='V += -J',
delay=self.homog_delay)
insert_benchmark_point("before_synapses_connect")
conn.connect(p=sparseness)
insert_benchmark_point("after_synapses_connect")
if self.heterog_delay is not None:
assert self.homog_delay is None
logger.info(f'Setting heterogeneous delays: "{self.heterog_delay}"')
conn.delay = self.heterog_delay
else:
logger.info(f'Setting homogeneous delays: "{self.homog_delay}"')
self.timed_run(self.duration)
class BrunelHakimHomogDelays(BrunelHakimBase):
"""
BrunelHakim with homogeneous delays from brian2 examples
"""
name = "Brunel Hakim with homogeneous delays (2 ms)"
tags = ["Neurons", "Synapses", "Delays"]
#n_range = [100, 1000, 10000, 20000, 40000, 70000, 100000, 130000, 200000, 500000, 900000]
# TITAN X
#n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, log10(912500)] #pass: 912500, fail: 925000
# A100
n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, np.log10(2100000)] #pass: 2100000 (~10**6.32), fail: 2200000
n_range = [int(10**p) for p in n_power]
# all delays 2 ms
homog_delay = 2*ms
sigmaext = 1*mV
muext = 25*mV
class BrunelHakimHeterogDelays(BrunelHakimBase):
"""
BrunelHakim with heterogeneous delays with same mean delay and
similar activity regime as brian2 example (with homogeneous delays).
"""
name = "Brunel Hakim with heterogeneous delays (uniform [0, 4] ms)"
tags = ["Neurons", "Synapses", "Delays"]
#n_range = [100, 1000, 10000, 20000, 50000, 100000, 380000] #pass: 389649, fail: 396484
# TITAN X
#n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5] #pass: 389649, fail: 396484
# A100
n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5] #pass: 450000, fail: 500000 XXX: same as TITAN X?
n_range = [int(10**p) for p in n_power]
# delays [0, 4] ms
heterog_delay = "4*ms * rand()"
# to have a similar network activity regime as for homogenous delays
# or narrow delay distribution
sigmaext = 0.33*mV
muext = 27*mV
class BrunelHakimHeterogDelaysNarrowDistr(BrunelHakimBase):
"""
BrunelHakim with heterogeneous delays with narrow delay distribution
with same mean delay and similar activity regime as brian2 example
(with homogeneous delays)
"""
name = "Brunel Hakim with heterogeneous delays (uniform 2 ms += dt)"
tags = ["Neurons", "Synapses", "Delays"]
#n_range = [100, 1000, 10000, 20000, 50000, 100000, 380000]
# TITAN X
#n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5] #pass: 423826, fail: 430661
# A100
n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6] #pass: 1350000 (~10**6.13), fail: 1500000
n_range = [int(10**p) for p in n_power]
# delays 2 ms +- dt
heterog_delay = "2*ms + 2 * dt * rand() - dt"
sigmaext = 1*mV
muext = 25*mV
class SynapsesOnlyHeterogeneousDelays(TimedSpeedTest):
category = "Synapses only with heterogeneous delays"
tags = ["Synapses"]
n_range = [100, 1000, 10000, 100000, 1000000]
n_label = 'Num neurons'
duration = 1 * second
# memory usage will be approximately p**2*rate*dt*N**2*bytes_per_synapse/1024**3 GB
# for CPU, bytes_per_synapse appears to be around 40?
def run(self):
N = self.n
rate = self.rate
M = int(rate * N * defaultclock.dt)
if M <= 0:
M = 1
G = NeuronGroup(M, 'v:1', threshold='True')
H = NeuronGroup(N, 'w:1')
S = Synapses(G, H, on_pre='w += 1.0')
insert_benchmark_point("before_synapses_connect")
S.connect(True, p=self.p)
insert_benchmark_point("after_synapses_connect")
S.delay = '4*ms * rand()'
#M = SpikeMonitor(G)
self.timed_run(self.duration,
# report='text',
)
#plot(M.t/ms, M.i, ',k')
class DenseMediumRateSynapsesOnlyHeterogeneousDelays(SynapsesOnlyHeterogeneousDelays):
name = "Dense, medium rate"
rate = 10 * Hz
p = 1.0
n_range = [100, 1000, 10000, 100000, 200000, 462500] #fail: 468750
class SparseLowRateSynapsesOnlyHeterogeneousDelays(SynapsesOnlyHeterogeneousDelays):
name = "Sparse, low rate"
rate = 1 * Hz
p = 0.2
n_range = [100, 1000, 10000, 100000, 500000, 1000000, 3281250] #fail: 3312500
class CUBAFixedConnectivityNoMonitor(TimedSpeedTest):
category = "Full examples"
name = "CUBA fixed connectivity, no monitor"
tags = ["Neurons", "Synapses"]
n_range = [100, 1000, 10000, 100000, 500000, 1000000, 3562500] #fail: 3578125
n_label = 'Num neurons'
# configuration options
duration = 1 * second
def run(self):
N = self.n
Ne = int(.8 * N)
taum = 20 * ms
taue = 5 * ms
taui = 10 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
eqs = '''
dv/dt = (ge+gi-(v-El))/taum : volt (unless refractory)
dge/dt = -ge/taue : volt (unless refractory)
dgi/dt = -gi/taui : volt (unless refractory)
'''
P = NeuronGroup(
N, eqs, threshold='v>Vt', reset='v = Vr', refractory=5 * ms)
P.v = 'Vr + rand() * (Vt - Vr)'
P.ge = 0 * mV
P.gi = 0 * mV
we = (60 * 0.27 / 10) * mV # excitatory synaptic weight (voltage)
wi = (-20 * 4.5 / 10) * mV # inhibitory synaptic weight
Ce = Synapses(P, P, on_pre='ge += we')
Ci = Synapses(P, P, on_pre='gi += wi')
insert_benchmark_point("before_synapses_connect")
Ce.connect('i<Ne', p=80. / N)
Ci.connect('i>=Ne', p=80. / N)
insert_benchmark_point("after_synapses_connect")
self.timed_run(self.duration)
class STDPCUDA(TimedSpeedTest):
"""
STDP benchmark with postsynaptic effects. On average 1000 out of N
presynaptic Poisson neurons are randomly connected to N/1000 postsynaptic
neurons, s.t. N is the nubmer of synapses. STDP is implemented as synaptic
variables and presynaptic spikes changes postsynaptic conductances.
"""
category = "Full examples"
tags = ["Neurons", "Synapses"]
n_label = 'Num neurons'
name = "STDP (event-driven, ~N neurons, N synapses)"
# TITAN X
#n_power = [3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7] #pass 11325000, fail:11520000
# A100
n_power = [3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, np.log10(19375000)] #pass 19375000 (~10**7.28), fail:20000000
n_range = [(int(10**p)//1000)*1000 for p in n_power] # needs to be multiple of 1000
# configuration options
duration = 10*second
post_effects = True
# homog delay is used in Synapses constructor (for GeNN compatibility)
homog_delay = 0*ms
# heterog delay is used to set Synapses delay attribute
heterog_delay = None
# connectivity style (if not random, each pre neuron is connected to a different set
# of K_poisson (1000) contiguous post neurons
connectivity_random = False
def run(self):
# preference for memory saving
prefs['devices.cuda_standalone.no_pre_references'] = True
# we draw by random K_poisson out of N_poisson (on avg.) and connect
# them to each post neuron
N = self.n
N_poisson = N
K_poisson = 1000
taum = 10*ms
taupre = 20*ms
taupost = taupre
Ee = 0*mV
vt = -54*mV
vr = -60*mV
El = -74*mV
taue = 5*ms
F = 15 * Hz
gmax = .01
dApre = .01
dApost = -dApre * taupre / taupost * 1.05
dApost *= gmax
dApre *= gmax
assert K_poisson == 1000
assert N % K_poisson == 0, f"{N} != {K_poisson}"
eqs_neurons = '''
dv/dt = (ge * (Ee-vr) + El - v) / taum : volt
dge/dt = -ge / taue {} : 1
'''
on_pre = ''
if self.post_effects:
logger.info("Simulating standard STDP with postsynaptic effects")
# normal mode => poissongroup spikes make effect on postneurons
eqs_neurons = eqs_neurons.format('')
on_pre += 'ge += w\n'
else:
logger.info("Simulating STDP without postsynaptic effects")
# second mode => poissongroup spikes are inffective for postneurons
# here: white noise process is added with similar mean and variance as
# poissongroup input that is disabled in this case
gsyn = K_poisson * F * gmax / 2. # assuming avg weight gmax/2 which holds approx. true for the bimodal distrib.
eqs_neurons = eqs_neurons.format('+ gsyn + sqrt(gsyn) * xi')
# eqs_neurons = eqs_neurons.format('')
on_pre += '''Apre += dApre
w = clip(w + Apost, 0, gmax)'''
input = PoissonGroup(N_poisson, rates=F)
neurons = NeuronGroup(N/K_poisson, eqs_neurons, threshold='v>vt', reset='v = vr')
S = Synapses(input, neurons,
'''w : 1
dApre/dt = -Apre / taupre : 1 (event-driven)
dApost/dt = -Apost / taupost : 1 (event-driven)''',
on_pre=on_pre,
on_post='''Apost += dApost
w = clip(w + Apre, 0, gmax)''',
delay=self.homog_delay
)
insert_benchmark_point("before_synapses_connect")
if self.connectivity_random:
# random poisson neurons connect to a post neuron (K_poisson many on avg)
S.connect(p=float(K_poisson)/N_poisson)
else:
# contiguous K_poisson many poisson neurons connect to a post neuron
S.connect('i < (j+1)*K_poisson and i >= j*K_poisson')
insert_benchmark_point("after_synapses_connect")
S.w = 'rand() * gmax'
if self.heterog_delay is not None:
assert self.homog_delay is None
logger.info(f'Setting heterogeneous delays: "{self.heterog_delay}"')
S.delay = self.heterog_delay
else:
logger.info(f'Setting homogeneous delays: "{self.homog_delay}"')
self.timed_run(self.duration)
class STDPCUDAHomogeneousDelays(STDPCUDA):
homog_delay = 2*ms
name = "STDP (event-driven, ~N neurons, N synapses, homogeneous delays)"
class STDPCUDAHeterogeneousDelays(STDPCUDA):
homog_delay = None
heterog_delay = "2 * 2*ms * rand()"
name = "STDP (event-driven, ~N neurons, N synapses, heterogeneous delays)"
# TITAN X
#n_power = [3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7] #pass 11397000, fail:11422000
# A100
n_power = [3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, np.log10(19375000)] #pass 19375000 (~10**7.28), fail:20000000
n_range = [(int(10**p)//1000)*1000 for p in n_power] # needs to be multiple of 1000
class STDPCUDAHeterogeneousDelaysNarrowDistr(STDPCUDA):
homog_delay = None
# delays 2 ms +- dt
heterog_delay = "2*ms + 2 * dt * rand() - dt"
name = "STDP (event-driven, ~N neurons, N synapses, heterogeneous delays narrow)"
# TITAN X
#n_power = [3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7] #pass 11397000, fail:11422000
# A100
n_power = [3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, np.log10(19375000)] #pass 19375000 (~10**7.28), fail:20000000
n_range = [(int(10**p)//1000)*1000 for p in n_power] # needs to be multiple of 1000
class STDPCUDARandomConnectivityHomogeneousDelays(STDPCUDAHomogeneousDelays):
connectivity_random = True
class STDPCUDARandomConnectivityHeterogeneousDelays(STDPCUDAHeterogeneousDelays):
connectivity_random = True
class STDPCUDARandomConnectivityHeterogeneousDelaysNarrowDistr(STDPCUDAHeterogeneousDelaysNarrowDistr):
connectivity_random = True
class STDPCUDANoPostEffects(STDPCUDA):
"""
STDP benchmark without postsynaptic effects. On average 1000 out of N
presynaptic Poisson neurons are randomly connected to N/1000 postsynaptic
neurons, s.t. N is the nubmer of synapses. STDP is implemented as synaptic
variables and presynaptic spikes have NO effect on postsynaptic variables.
Postsynaptic neurons are driven with white noise of similar mean and
variance as the input from the presynaptic Poisson neurons would create if
they had postsynaptic effects.
"""
name = "STDP (event-driven, ~N neurons, N synapses, NO postsynaptic effects)"
post_effects = False
class STDPEventDriven(TimedSpeedTest):
category = "Full examples"
name = "STDP (event-driven)"
tags = ["Neurons", "Synapses"]
n_range = [100, 1000, 10000, 20000, 50000, 100000, 1000000, 5000000, 6542968] #fail:6562500
n_label = 'Num neurons'
# configuration options
duration = 1*second
def run(self):
N = self.n
taum = 10*ms
taupre = 20*ms
taupost = taupre
Ee = 0*mV
vt = -54*mV
vr = -60*mV
El = -74*mV
taue = 5*ms
F = 15*Hz
gmax = .01
dApre = .01
dApost = -dApre * taupre / taupost * 1.05
dApost *= gmax
dApre *= gmax
eqs_neurons = '''
dv/dt = (ge * (Ee-vr) + El - v) / taum : volt
dge/dt = -ge / taue : 1
'''
input_poisson = PoissonGroup(N, rates=F)
neurons = NeuronGroup(1, eqs_neurons, threshold='v>vt', reset='v = vr')
S = Synapses(input_poisson, neurons,
'''w : 1
dApre/dt = -Apre / taupre : 1 (event-driven)
dApost/dt = -Apost / taupost : 1 (event-driven)''',
on_pre='''ge += w
Apre += dApre
w = clip(w + Apost, 0*siemens, gmax)''',
on_post='''Apost += dApost
w = clip(w + Apre, 0*siemens, gmax)'''
)
insert_benchmark_point("before_synapses_connect")
S.connect()
insert_benchmark_point("after_synapses_connect")
S.w = 'rand() * gmax'
self.timed_run(self.duration)
class MushroomBody(TimedSpeedTest):
category = "Full examples"
name = "Mushroom Body example from brian2GeNN benchmarks"
tags = ["Neurons", "Synapses"]
# TITAN X
#n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, log10(7600000)] # pass:7600000, fail: 7640000
# A100
n_power = [2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, log10(15200000)] # pass:15200000 (~10**7.18), fail: 30400000
n_range = [int(10**p) for p in n_power]
n_label = 'Num neurons'
# configuration options
duration = 10*second
def run(self):
# preference for memory saving
prefs['devices.cuda_standalone.no_pre_references'] = True
import random as py_random
# Number of neurons
N_AL = 100
N_MB = self.n
N_LB = 100
# Constants
g_Na = 7.15*uS
E_Na = 50*mV
g_K = 1.43*uS
E_K = -95*mV
g_leak = 0.0267*uS
E_leak = -63.56*mV
C = 0.3*nF
VT = -63*mV
# Those two constants are dummy constants, only used when populations only have
# either inhibitory or excitatory inputs
E_e = 0*mV
E_i = -92*mV
# Actual constants used for synapses
NKCKC= N_MB
if NKCKC > 10000:
NKCKC = 10000
g_scaling = NKCKC/2500
if g_scaling < 1:
g_scaling= 1
tau_PN_LHI = 1*ms
tau_LHI_iKC = 3*ms
tau_PN_iKC = 2*ms
tau_iKC_eKC = 10*ms
tau_eKC_eKC = 5*ms
w_LHI_iKC = 8.75*nS
w_eKC_eKC = 75*nS
tau_pre = tau_post = 10*ms
dApre = 0.1*nS/g_scaling
dApost = -dApre
g_max = 3.75*nS/g_scaling
scale = .675
traub_miles = '''
dV/dt = -(1./C)*(g_Na*m**3.*h*(V - E_Na) +
g_K*n**4.*(V - E_K) +
g_leak*(V - E_leak) +
I_syn) : volt
dm/dt = alpha_m*(1 - m) - beta_m*m : 1
dn/dt = alpha_n*(1 - n) - beta_n*n : 1
dh/dt = alpha_h*(1 - h) - beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13.*mV-V+VT)/
(exp((13.*mV-V+VT)/(4.*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(V-VT-40.*mV)/
(exp((V-VT-40.*mV)/(5.*mV))-1.)/ms : Hz
alpha_h = 0.128*exp((17.*mV-V+VT)/(18.*mV))/ms : Hz
beta_h = 4./(1.+exp((40.*mV-V+VT)/(5.*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1.)*(15.*mV-V+VT)/
(exp((15.*mV-V+VT)/(5.*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-V+VT)/(40.*mV))/ms : Hz
'''
# Principal neurons (Antennal Lobe)
n_patterns = 10
n_repeats = int(self.duration/second*10)
p_perturb = 0.1
patterns = np.repeat(np.array([np.random.choice(N_AL, int(0.2*N_AL), replace=False) for _ in range(n_patterns)]), n_repeats, axis=0)
# Make variants of the patterns
to_replace = np.random.binomial(int(0.2*N_AL), p=p_perturb, size=n_patterns*n_repeats)
variants = []
for idx, variant in enumerate(patterns):
np.random.shuffle(variant)
if to_replace[idx] > 0:
variant = variant[:-to_replace[idx]]
new_indices = np.random.randint(N_AL, size=to_replace[idx])
variant = np.unique(np.concatenate([variant, new_indices]))
variants.append(variant)
training_size = (n_repeats-10)
training_variants = []
for p in range(n_patterns):
training_variants.extend(variants[n_repeats * p:n_repeats * p + training_size])
py_random.shuffle(training_variants)
sorted_variants = list(training_variants)
for p in range(n_patterns):
sorted_variants.extend(variants[n_repeats * p + training_size:n_repeats * (p + 1)])
spike_times = np.arange(n_patterns*n_repeats)*50*ms + 1*ms + rand(n_patterns*n_repeats)*2*ms
spike_times = spike_times.repeat([len(p) for p in sorted_variants])
spike_indices = np.concatenate(sorted_variants)
PN = SpikeGeneratorGroup(N_AL, spike_indices, spike_times)
# iKC of the mushroom body
I_syn = '''I_syn = g_PN_iKC*(V - E_e): amp
dg_PN_iKC/dt = -g_PN_iKC/tau_PN_iKC : siemens'''
eqs_iKC = Equations(traub_miles) + Equations(I_syn)
iKC = NeuronGroup(N_MB, eqs_iKC, threshold='V>0*mV', refractory='V>0*mV',
method='exponential_euler')
# eKCs of the mushroom body lobe
I_syn = '''I_syn = g_iKC_eKC*(V - E_e) + g_eKC_eKC*(V - E_i): amp
dg_iKC_eKC/dt = -g_iKC_eKC/tau_iKC_eKC : siemens
dg_eKC_eKC/dt = -g_eKC_eKC/tau_eKC_eKC : siemens'''
eqs_eKC = Equations(traub_miles) + Equations(I_syn)
eKC = NeuronGroup(N_LB, eqs_eKC, threshold='V>0*mV', refractory='V>0*mV',
method='exponential_euler')
# Synapses
PN_iKC = Synapses(PN, iKC, 'weight : siemens', on_pre='g_PN_iKC += scale*weight')
iKC_eKC = Synapses(iKC, eKC,
'''g_raw : siemens
dApre/dt = -Apre / tau_pre : siemens (event-driven)
dApost/dt = -Apost / tau_post : siemens (event-driven)
''',
on_pre='''g_iKC_eKC += g_raw
Apre += dApre
g_raw = clip(g_raw + Apost, 0*siemens, g_max)
''',
on_post='''
Apost += dApost
g_raw = clip(g_raw + Apre, 0*siemens, g_max)''',
delay=0*ms)
eKC_eKC = Synapses(eKC, eKC, on_pre='g_eKC_eKC += scale*w_eKC_eKC', delay=0*ms)
insert_benchmark_point("before_synapses_connect")
PN_iKC.connect(p=0.15)
if (N_MB > 10000):
iKC_eKC.connect(p=float(10000)/N_MB)
else:
iKC_eKC.connect()
eKC_eKC.connect()
insert_benchmark_point("after_synapses_connect")
# First set all synapses as "inactive", then set 20% to active
PN_iKC.weight = '10*nS + 1.25*nS*randn()'
iKC_eKC.g_raw = 'rand()*g_max/10/g_scaling'
iKC_eKC.g_raw['rand() < 0.2'] = '(2.5*nS + 0.5*nS*randn())/g_scaling'
iKC.V = E_leak
iKC.h = 1
iKC.m = 0
iKC.n = .5
eKC.V = E_leak
eKC.h = 1
eKC.m = 0
eKC.n = .5
#if use_spikemon:
# PN_spikes = SpikeMonitor(PN)
# iKC_spikes = SpikeMonitor(iKC)
# eKC_spikes = SpikeMonitor(eKC)
self.timed_run(self.duration)
if __name__=='__main__':
#prefs.codegen.target = 'numpy'
ThresholderOnlyPoissonLowRate(10).run()
#show()
|
brian-team/brian2cuda
|
brian2cuda/tests/features/speed.py
|
Python
|
gpl-2.0
| 30,610
|
[
"Brian",
"NEURON"
] |
35dd339f9aa266a2907f18cf9c65011aabfa831aa047f24bed46abb106b866b9
|
1 #!/usr/bin/env python
2 """Module cma implements the CMA-ES (Covariance Matrix Adaptation Evolution Strategy).
3
4 CMA-ES is a stochastic optimizer for robust non-linear non-convex
5 derivative- and function-value-free numerical optimization.
6
7 This implementation can be used with Python versions 2.6, 2.7, 3.x.
8
9 CMA-ES searches for a minimizer (a solution x in :math:`R^n`) of an
10 objective function f (cost function), such that f(x) is minimal.
11 Regarding f, only a passably reliable ranking of the candidate
12 solutions in each iteration is necessary. Neither the function values
13 itself, nor the gradient of f need to be available or do matter (like
14 in the downhill simplex Nelder-Mead algorithm). Some termination
15 criteria however depend on actual f-values.
16
17 Two interfaces are provided:
18
19 - function `fmin(func, x0, sigma0,...)`
20 runs a complete minimization
21 of the objective function func with CMA-ES.
22
23 - class `CMAEvolutionStrategy`
24 allows for minimization such that the
25 control of the iteration loop remains with the user.
26
27
28 Used packages:
29
30 - unavoidable: `numpy` (see `barecmaes2.py` if `numpy` is not
31 available),
32 - avoidable with small changes: `time`, `sys`
33 - optional: `matplotlib.pyplot` (for `plot` etc., highly
34 recommended), `pprint` (pretty print), `pickle` (in class
35 `Sections`), `doctest`, `inspect`, `pygsl` (never by default)
36
37 Install
38 -------
39 The file ``cma.py`` only needs to be visible in the python path (e.g. in
40 the current working directory), but can also be installed in the
41 terminal command line by::
42
43 python cma.py --install
44
45 which solely calls the ``setup`` function from the standard
46 ``distutils.core`` package for installation. If the ``setup.py``
47 file is been provided with ``cma.py``, the standard call is
48
49 python setup.py cma
50
51 Both calls need to see ``cma.py`` in the current working directory and
52 might need to be preceded with ``sudo``.
53
54 We can install or upgrade the currently installed version also with::
55
56 pip install --upgrade cma
57
58 Testing
59 -------
60 From the system shell::
61
62 python cma.py --test
63
64 or from the Python shell ``ipython -pylab``::
65
66 run cma.py --test
67
68 or from any python shell
69
70 import cma
71 cma.main('--test')
72
73 runs ``doctest.testmod(cma)`` showing only exceptions (and not the
74 tests that fail due to small differences in the output) and should
75 run without complaints in about between 20 and 100 seconds.
76
77 Example
78 -------
79 From a python shell::
80
81 import cma
82 help(cma) # "this" help message, use cma? in ipython
83 help(cma.fmin)
84 help(cma.CMAEvolutionStrategy)
85 help(cma.CMAOptions)
86 cma.CMAOptions('tol') # display 'tolerance' termination options
87 cma.CMAOptions('verb') # display verbosity options
88 res = cma.fmin(cma.Fcts.tablet, 15 * [1], 1)
89 res[0] # best evaluated solution
90 res[5] # mean solution, presumably better with noise
91
92 :See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy`
93
94 :Author: Nikolaus Hansen, 2008-2014
95
96 :License: MIT, see below.
97
98 """
99
100 # The MIT License (MIT)
101 # Copyright (c) 2014 Inria
102 # Author: Nikolaus Hansen, 2008-2014
103 #
104 # Permission is hereby granted, free of charge, to any person obtaining
105 # a copy of this software and associated documentation files (the
106 # "Software"), to deal in the Software without restriction, including
107 # without limitation the rights to use, copy, modify, merge, publish,
108 # distribute, sublicense, and/or sell copies of the Software, and to
109 # permit persons to whom the Software is furnished to do so, subject to
110 # the following conditions:
111 #
112 # The above copyright and authorship notice and this permission notice
113 # shall be included in all copies or substantial portions of the
114 # Software.
115 #
116 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
117 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
118 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
119 # . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
120 # ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
121 # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
122 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
123
124
125 # (note to self) for testing:
126 # pyflakes cma.py # finds bugs by static analysis
127 # pychecker --limit 60 cma.py # also executes, all 60 warnings checked
128 # or python ~/Downloads/pychecker-0.8.19/pychecker/checker.py cma.py
129 # python cma.py -t -quiet # executes implemented tests based on doctest
130 # python -3 cma.py --test 2> out2to3warnings.txt #
131
132 # to create a html documentation file:
133 # pydoc -w cma # edit the header (remove local pointers)
134 # epydoc cma.py # comes close to javadoc but does not find the
135 # # links of function references etc
136 # doxygen needs @package cma as first line in the module docstring
137 # some things like class attributes are not interpreted correctly
138 # sphinx: doc style of doc.python.org, could not make it work (yet)
139
140 # TODO: should optimize return self? Disadvantage: getting a hand on
141 # the logger reference becomes very obscure in a one-line call.
142 # TODO: separate display and logging options, those CMAEvolutionStrategy
143 # instances don't use them themselves (probably all)
144 # TODO: disp method is implemented in CMAEvolutionStrategy and in
145 # CMADataLogger separately, OOOptimizer.disp_str should return a str
146 # which can be used uniformly?
147 # TODO: check scitools.easyviz and how big the adaptation would be
148 # TODO: split tell into a variable transformation part and the "pure"
149 # functionality
150 # usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
151 # genotypic repair is not part of tell_geno
152 # TODO: copy_always optional parameter does not make much sense,
153 # as one can always copy the input argument first,
154 # however some calls are simpler
155 # TODO: generalize input logger in optimize() as after_iteration_handler
156 # (which is logger.add by default)? One difficulty is that
157 # the logger object is returned (not anymore when return of optimize
158 # is change). Another difficulty is the obscure usage of modulo
159 # for writing a final data line in optimize.
160 # TODO: separate initialize==reset_state from __init__
161 # TODO: introduce Ypos == diffC which makes the code more consistent and
162 # the active update "exact"?
163 # TODO: dynamically read "signals" from a file, see myproperties.py
164 # (to be called after tell())
165 #
166 # typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun,
167 # callback=None
168 # maxfev, diag (A sequency of N positive entries that serve as
169 # scale factors for the variables.)
170 # full_output -- non-zero to return all optional outputs.
171 # If xtol < 0.0, xtol is set to sqrt(machine_precision)
172 # 'infot -- a dictionary of optional outputs with the keys:
173 # 'nfev': the number of function calls...
174 #
175 # see eg fmin_powell
176 # typical returns
177 # x, f, dictionary d
178 # (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag},
179 # <allvecs>)
180 #
181 # TODO: keep best ten solutions
182 # TODO: implement constraints handling
183 # TODO: extend function unitdoctest, or use unittest?
184 # TODO: apply style guide
185 # TODO: eigh(): thorough testing would not hurt
186
187 # changes:
188 # 14/05/07: added method result_pretty to pretty print optimization result
189 # 14/05/06: associated show() everywhere with ion() which should solve the
190 # blocked terminal problem
191 # 14/05/05: all instances of "unicode" removed (was incompatible to 3.x)
192 # 14/05/05: replaced type(x) == y with isinstance(x, y), reorganized the
193 # comments before the code starts
194 # 14/05/xx: change the order of kwargs of OOOptimizer.optimize,
195 # remove prepare method in AdaptSigma classes, various changes/cleaning
196 # 14/03/01: bug fix BoundaryHandlerBase.has_bounds didn't check lower bounds correctly
197 # bug fix in BoundPenalty.repair len(bounds[0]) was used instead of len(bounds[1])
198 # bug fix in GenoPheno.pheno, where x was not copied when only boundary-repair was applied
199 # 14/02/27: bug fixed when BoundPenalty was combined with fixed variables.
200 # 13/xx/xx: step-size adaptation becomes a class derived from CMAAdaptSigmaBase,
201 # to make testing different adaptation rules (much) easier
202 # 12/12/14: separated CMAOptions and arguments to fmin
203 # 12/10/25: removed useless check_points from fmin interface
204 # 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods
205 # timesCroot and divCroot to the right class
206 # 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed,
207 # sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2)
208 # is used for weight calculation.
209 # 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation
210 # 12/07/21: convert value True for noisehandling into 1 making the output compatible
211 # 12/01/30: class Solution and more old stuff removed r3101
212 # 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
213 # 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
214 # 11/09/30: flat fitness termination checks also history length
215 # 11/09/30: elitist option (using method clip_or_fit_solutions)
216 # 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
217 # injected or modified solutions and even reliable adaptive encoding
218 # 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
219 # 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
220 # fixed: method settableOptionsList, also renamed to versatileOptions
221 # default seed depends on time now
222 # 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
223 # fixed: output argument ordering in fmin, print now only used as function
224 # removed: parallel option in fmin
225 # 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
226 # 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
227 # also the return value of fmin changed and attribute stop is now a method.
228 # 11/04/22: bug-fix: option fixed_variables in combination with scaling
229 # 11/04/21: stopdict is not a copy anymore
230 # 11/04/15: option fixed_variables implemented
231 # 11/03/23: bug-fix boundary update was computed even without boundaries
232 # 11/03/12: bug-fix of variable annotation in plots
233 # 11/02/05: work around a memory leak in numpy
234 # 11/02/05: plotting routines improved
235 # 10/10/17: cleaning up, now version 0.9.30
236 # 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
237 # if input scaling_of_variables is given)
238 # 08/10/01: option evalparallel introduced,
239 # bug-fix for scaling being a vector
240 # 08/09/26: option CMAseparable becomes CMA_diagonal
241 # 08/10/18: some names change, test functions go into a class
242 # 08/10/24: more refactorizing
243 # 10/03/09: upper bound exp(min(1,...)) for step-size control
244
245 from __future__ import division
246 # future is >= 3.0, this code has mainly been used with 2.6 & 2.7
247 from __future__ import with_statement
248 # only necessary for python 2.5 and not in heavy use
249 from __future__ import print_function
250 # available from python 2.6, code should also work without
251 from __future__ import absolute_import
252 from __future__ import unicode_literals
253 # from __future__ import collections.MutableMapping
254 # does not exist in future, otherwise Python 2.5 would work, since 0.91.01
255
256 import sys
257 if not sys.version.startswith('2'): # in python 3
258 xrange = range
259 raw_input = input
260 basestring = str
261 import time # not really essential
262 import collections
263 import numpy as np
264 # arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh,
265 # sort, argsort, random, ones,...
266 from numpy import inf, array, dot, exp, log, sqrt, sum
267 # to access the built-in sum fct: ``__builtins__.sum`` or ``del sum``
268 # removes the imported sum and recovers the shadowed build-in
269 try:
270 import matplotlib
271 import matplotlib.pyplot as pyplot # also: use ipython -pyplot
272 savefig = pyplot.savefig # now we can use cma.savefig() etc
273 closefig = pyplot.close
274 - def show():
275 # is_interactive = matplotlib.is_interactive()
276 pyplot.ion()
277 pyplot.show()
278 # if we call now matplotlib.interactive(True), the console is
279 # blocked
280 pyplot.ion() # prevents that execution stops after plotting
281 except:
282 pyplot = None
283 savefig = None
284 closefig = None
285 - def show():
286 print('pyplot.show() is not available')
287 print('Could not import matplotlib.pyplot, therefore ``cma.plot()``" +'
288 ' etc. is not available')
289
290 __author__ = 'Nikolaus Hansen'
291 __version__ = "1.0.07 $Revision: 3809 $ $Date: 2014-05-08 02:37:39 +0200 (Thu, 08 May 2014) $"
292 # $Source$ # according to PEP 8 style guides, but what is it good for?
293 # $Id: cma.py 3809 2014-05-08 00:37:39Z hansen $
294 # bash $: svn propset svn:keywords 'Date Revision Id' cma.py
295
296 __docformat__ = "reStructuredText" # this hides some comments entirely?
297 __all__ = (
298 'main',
299 'fmin',
300 'fcts',
301 'Fcts',
302 'felli',
303 'rotate',
304 'pprint',
305 'plot',
306 'disp',
307 'show',
308 'savefig',
309 'closefig',
310 'use_archives',
311 'is_feasible',
312 'unitdoctest',
313 'DerivedDictBase',
314 'SolutionDict',
315 'CMASolutionDict',
316 'BestSolution',
317 'BoundaryHandlerBase',
318 'BoundNone',
319 'BoundTransform',
320 'BoundPenalty',
321 'BoxConstraintsTransformationBase',
322 'BoxConstraintsLinQuadTransformation',
323 'GenoPheno',
324 'OOOptimizer',
325 'CMAEvolutionStrategy',
326 'CMAOptions',
327 'CMASolutionDict',
328 'CMAAdaptSigmaBase',
329 'CMAAdaptSigmaNone',
330 'CMAAdaptSigmaDistanceProportional',
331 'CMAAdaptSigmaCSA',
332 'CMAAdaptSigmaTPA',
333 'CMAAdaptSigmaMedianImprovement',
334 'BaseDataLogger',
335 'CMADataLogger',
336 'DEAPCMADataLogger',
337 'NoiseHandler',
338 'Sections',
339 'Misc',
340 'Mh',
341 'Rotation',
342 'FitnessFunctions'
343 )
344 use_archives = True
345 # speed up for very large population size, prevents the need for an
346 # inverse gp-transformation, relies on collections module
347 # not sure what happens if set to False
348
349
350 # emptysets = ('', (), [], {})
351 # array([]) does not work but np.size(.) == 0
352 # here is the problem:
353 # bool(array([0])) is False
354 # bool(list(array([0]))) is True
355 # bool(list(array([0, 1]))) is True
356 # bool(array([0, 1])) raises ValueError
357 #
358 # "x in emptysets" cannot be well replaced by "not x"
359 # which is also True for array([]) and None, but also for 0 and False,
360 # and False for NaN, and an exception for array([0,1]), see also
361 # http://google-styleguide.googlecode.com/svn/trunk/pyguide.html#True/False_evaluations
362
363 # ____________________________________________________________
364 # ____________________________________________________________
365 #
366 -def rglen(ar):
367 """shortcut for the iterator ``xrange(len(ar))``"""
368 return xrange(len(ar))
369
370 -def is_feasible(x, f):
371 """default to check feasibility, see also ``cma_default_options``"""
372 return f is not None and f is not np.NaN
373
374 -def _print_warning(msg, method_name=None, class_name=None, iteration=None,
375 verbose=1):
376 if verbose > 0:
377 print('WARNING (module=' + __name__ +
378 (', class=' + str(class_name) if class_name else '') +
379 (', method=' + str(method_name) if method_name else '') +
380 (', iteration=' + str(iteration) if iteration else '') +
381 '): ' + msg)
382
383 # ____________________________________________________________
384 # ____________________________________________________________
385 #
386 -def unitdoctest():
387 """is used to describe test cases and might in future become helpful
388 as an experimental tutorial as well. The main testing feature at the
389 moment is by doctest with ``cma._test()`` or conveniently by
390 ``python cma.py --test``. With the ``--verbose`` option added, the
391 results will always slightly differ and many "failed" test cases
392 might be reported.
393
394 A simple first overall test:
395 >>> import cma
396 >>> res = cma.fmin(cma.fcts.elli, 3*[1], 1,
397 ... {'CMA_diagonal':2, 'seed':1, 'verb_time':0})
398 (3_w,7)-CMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=1)
399 Covariance matrix is diagonal for 2 iterations (1/ccov=7.0)
400 Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
401 1 7 1.453161670768570e+04 1.2e+00 1.08e+00 1e+00 1e+00
402 2 14 3.281197961927601e+04 1.3e+00 1.22e+00 1e+00 2e+00
403 3 21 1.082851071704020e+04 1.3e+00 1.24e+00 1e+00 2e+00
404 100 700 8.544042012075362e+00 1.4e+02 3.18e-01 1e-03 2e-01
405 200 1400 5.691152415221861e-12 1.0e+03 3.82e-05 1e-09 1e-06
406 220 1540 3.890107746209078e-15 9.5e+02 4.56e-06 8e-11 7e-08
407 termination on tolfun : 1e-11
408 final/bestever f-value = 3.89010774621e-15 2.52273602735e-15
409 mean solution: [ -4.63614606e-08 -3.42761465e-10 1.59957987e-11]
410 std deviation: [ 6.96066282e-08 2.28704425e-09 7.63875911e-11]
411
412 Test on the Rosenbrock function with 3 restarts. The first trial only
413 finds the local optimum, which happens in about 20% of the cases.
414
415 >>> import cma
416 >>> res = cma.fmin(cma.fcts.rosen, 4*[-1], 1,
417 ... {'ftarget':1e-6, 'restarts':3,
418 ... 'verb_time':0, 'verb_disp':500, 'seed':3})
419 (4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=3)
420 Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
421 1 8 4.875315645656848e+01 1.0e+00 8.43e-01 8e-01 8e-01
422 2 16 1.662319948123120e+02 1.1e+00 7.67e-01 7e-01 8e-01
423 3 24 6.747063604799602e+01 1.2e+00 7.08e-01 6e-01 7e-01
424 184 1472 3.701428610430019e+00 4.3e+01 9.41e-07 3e-08 5e-08
425 termination on tolfun : 1e-11
426 final/bestever f-value = 3.70142861043 3.70142861043
427 mean solution: [-0.77565922 0.61309336 0.38206284 0.14597202]
428 std deviation: [ 2.54211502e-08 3.88803698e-08 4.74481641e-08 3.64398108e-08]
429 (8_w,16)-CMA-ES (mu_w=4.8,w_1=32%) in dimension 4 (seed=4)
430 Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
431 1 1489 2.011376859371495e+02 1.0e+00 8.90e-01 8e-01 9e-01
432 2 1505 4.157106647905128e+01 1.1e+00 8.02e-01 7e-01 7e-01
433 3 1521 3.548184889359060e+01 1.1e+00 1.02e+00 8e-01 1e+00
434 111 3249 6.831867555502181e-07 5.1e+01 2.62e-02 2e-04 2e-03
435 termination on ftarget : 1e-06
436 final/bestever f-value = 6.8318675555e-07 1.18576673231e-07
437 mean solution: [ 0.99997004 0.99993938 0.99984868 0.99969505]
438 std deviation: [ 0.00018973 0.00038006 0.00076479 0.00151402]
439 >>> assert res[1] <= 1e-6
440
441 Notice the different termination conditions. Termination on the target
442 function value ftarget prevents further restarts.
443
444 Test of scaling_of_variables option
445
446 >>> import cma
447 >>> opts = cma.CMAOptions()
448 >>> opts['seed'] = 456
449 >>> opts['verb_disp'] = 0
450 >>> opts['CMA_active'] = 1
451 >>> # rescaling of third variable: for searching in roughly
452 >>> # x0 plus/minus 1e3*sigma0 (instead of plus/minus sigma0)
453 >>> opts.scaling_of_variables = [1, 1, 1e3, 1]
454 >>> res = cma.fmin(cma.fcts.rosen, 4 * [0.1], 0.1, opts)
455 termination on tolfun : 1e-11
456 final/bestever f-value = 2.68096173031e-14 1.09714829146e-14
457 mean solution: [ 1.00000001 1.00000002 1.00000004 1.00000007]
458 std deviation: [ 3.00466854e-08 5.88400826e-08 1.18482371e-07 2.34837383e-07]
459
460 The printed std deviations reflect the actual true value (not the one
461 in the internal representation which would be different).
462
463 >>> import cma
464 >>> r = cma.fmin(cma.fcts.diffpow, 15 * [1], 1,
465 ... {'CMA_dampsvec_fac':0.5, 'ftarget':1e-9})
466 >>> assert(r[1] < 1e-9)
467 >>> assert(r[2] < 13000) # only passed with CMA_dampsvec_fac
468
469
470 :See: cma.main(), cma._test()
471
472 """
473
474 pass
475
476 # ____________________________________________________________
477 # ____________________________________________________________
478 #
479 -class _BlancClass(object):
480 """blanc container class for having a collection of attributes"""
481
482 # _____________________________________________________________________
483 # _____________________________________________________________________
484 #
485 -class DerivedDictBase(collections.MutableMapping):
486 """for conveniently adding "features" to a dictionary. The actual
487 dictionary is in ``self.data``. Copy-paste
488 and modify setitem, getitem, and delitem, if necessary"""
489 - def __init__(self, *args, **kwargs):
490 # collections.MutableMapping.__init__(self)
491 super(DerivedDictBase, self).__init__()
492 # super(SolutionDict, self).__init__() # the same
493 self.data = dict(*args, **kwargs)
494 - def __len__(self):
495 return len(self.data)
496 - def __contains__(self, value):
497 return value in self.data
498 - def __iter__(self):
499 return iter(self.data)
500 - def __setitem__(self, key, value):
501 """defines self[key] = value"""
502 self.data[key] = value
503 - def __getitem__(self, key):
504 """defines self[key]"""
505 return self.data[key]
506 - def __delitem__(self, key):
507 del self.data[key]
508
509 -class SolutionDict(DerivedDictBase):
510 """dictionary with computation of an hash key.
511
512 The hash key is generated from the inserted solution and a stack of
513 previously inserted same solutions is provided. Each entry is meant
514 to store additional information related to the solution.
515
516 >>> import cma, numpy as np
517 >>> d = cma.SolutionDict()
518 >>> x = np.array([1,2,4])
519 >>> d[x] = {'f': sum(x**2), 'iteration': 1}
520 >>> assert d[x]['iteration'] == 1
521 >>> assert d.get(x) == (d[x] if d.key(x) in d.keys() else None)
522
523 TODO: data_with_same_key behaves like a stack (see setitem and
524 delitem), but rather should behave like a queue?! A queue is less
525 consistent with the operation self[key] = ..., if
526 self.data_with_same_key[key] is not empty.
527
528 TODO: iteration key is used to clean up without error management
529
530 """
531 - def __init__(self, *args, **kwargs):
532 DerivedDictBase.__init__(self, *args, **kwargs)
533 self.data_with_same_key = {}
534 self.last_iteration = 0
535 - def key(self, x):
536 try:
537 return tuple(x)
538 # using sum(x) is slower, using x[0] is slightly faster
539 except TypeError:
540 return x
541 - def __setitem__(self, key, value):
542 """defines self[key] = value"""
543 key = self.key(key)
544 if key in self.data_with_same_key:
545 self.data_with_same_key[key] += [self.data[key]]
546 elif key in self.data:
547 self.data_with_same_key[key] = [self.data[key]]
548 self.data[key] = value
549 - def __getitem__(self, key): # 50% of time of
550 """defines self[key]"""
551 return self.data[self.key(key)]
552 - def __delitem__(self, key):
553 """remove only most current key-entry"""
554 key = self.key(key)
555 if key in self.data_with_same_key:
556 if len(self.data_with_same_key[key]) == 1:
557 self.data[key] = self.data_with_same_key.pop(key)[0]
558 else:
559 self.data[key] = self.data_with_same_key[key].pop(-1)
560 else:
561 del self.data[key]
562 - def truncate(self, max_len, min_iter):
563 if len(self) > max_len:
564 for k in list(self.keys()):
565 if self[k]['iteration'] < min_iter:
566 del self[k]
567 # deletes one item with k as key, better delete all?
568
569 -class CMASolutionDict(SolutionDict):
570 - def __init__(self, *args, **kwargs):
571 SolutionDict.__init__(self, *args, **kwargs)
572 self.last_solution_index = 0
573
574 # TODO: insert takes 30% of the overall CPU time, mostly in def key()
575 # with about 15% of the overall CPU time
576 - def insert(self, key, geno=None, iteration=None, fitness=None, value=None):
577 """insert an entry with key ``key`` and value
578 ``value if value is not None else {'geno':key}`` and
579 ``self[key]['kwarg'] = kwarg if kwarg is not None`` for the further kwargs.
580
581 """
582 # archive returned solutions, first clean up archive
583 if iteration is not None and iteration > self.last_iteration and (iteration % 10) < 1:
584 self.truncate(300, iteration - 3)
585 elif value is not None and value.get('iteration'):
586 iteration = value['iteration']
587 if (iteration % 10) < 1:
588 self.truncate(300, iteration - 3)
589
590 self.last_solution_index += 1
591 if value is not None:
592 try:
593 iteration = value['iteration']
594 except:
595 pass
596 if iteration is not None:
597 if iteration > self.last_iteration:
598 self.last_solution_index = 0
599 self.last_iteration = iteration
600 else:
601 iteration = self.last_iteration + 0.5 # a hack to get a somewhat reasonable value
602 if value is not None:
603 self[key] = value
604 else:
605 self[key] = {'pheno': key}
606 if geno is not None:
607 self[key]['geno'] = geno
608 if iteration is not None:
609 self[key]['iteration'] = iteration
610 if fitness is not None:
611 self[key]['fitness'] = fitness
612 return self[key]
613
614 if not use_archives:
615 - class CMASolutionDict(SolutionDict):
616 - def insert(self, *args, **kwargs):
617 pass
618
619 -class BestSolution(object):
620 """container to keep track of the best solution seen"""
621 - def __init__(self, x=None, f=np.inf, evals=None):
622 """initialize the best solution with `x`, `f`, and `evals`.
623 Better solutions have smaller `f`-values.
624
625 """
626 self.x = x
627 self.x_geno = None
628 self.f = f if f is not None and f is not np.nan else np.inf
629 self.evals = evals
630 self.evalsall = evals
631 self.last = _BlancClass()
632 self.last.x = x
633 self.last.f = f
634 - def update(self, arx, xarchive=None, arf=None, evals=None):
635 """checks for better solutions in list `arx`.
636
637 Based on the smallest corresponding value in `arf`,
638 alternatively, `update` may be called with a `BestSolution`
639 instance like ``update(another_best_solution)`` in which case
640 the better solution becomes the current best.
641
642 `xarchive` is used to retrieve the genotype of a solution.
643
644 """
645 if isinstance(arx, BestSolution):
646 if self.evalsall is None:
647 self.evalsall = arx.evalsall
648 elif arx.evalsall is not None:
649 self.evalsall = max((self.evalsall, arx.evalsall))
650 if arx.f is not None and arx.f < np.inf:
651 self.update([arx.x], xarchive, [arx.f], arx.evals)
652 return self
653 assert arf is not None
654 # find failsave minimum
655 minidx = np.nanargmin(arf)
656 if minidx is np.nan:
657 return
658 minarf = arf[minidx]
659 # minarf = reduce(lambda x, y: y if y and y is not np.nan
660 # and y < x else x, arf, np.inf)
661 if minarf < np.inf and (minarf < self.f or self.f is None):
662 self.x, self.f = arx[minidx], arf[minidx]
663 if xarchive is not None and xarchive.get(self.x) is not None:
664 self.x_geno = xarchive[self.x].get('geno')
665 else:
666 self.x_geno = None
667 self.evals = None if not evals else evals - len(arf) + minidx + 1
668 self.evalsall = evals
669 elif evals:
670 self.evalsall = evals
671 self.last.x = arx[minidx]
672 self.last.f = minarf
673 - def get(self):
674 """return ``(x, f, evals)`` """
675 return self.x, self.f, self.evals # , self.x_geno
676
677
678 # ____________________________________________________________
679 # ____________________________________________________________
680 #
681 -class BoundaryHandlerBase(object):
682 """hacked base class """
683 - def __init__(self, bounds):
684 """bounds are not copied, but possibly modified and
685 put into a normalized form: ``bounds`` can be ``None``
686 or ``[lb, ub]`` where ``lb`` and ``ub`` are
687 either None or a vector (which can have ``None`` entries).
688
689 Generally, the last entry is recycled to compute bounds
690 for any dimension.
691
692 """
693 if not bounds:
694 self.bounds = None
695 else:
696 l = [None, None] # figure out lenths
697 for i in [0, 1]:
698 try:
699 l[i] = len(bounds[i])
700 except TypeError:
701 bounds[i] = [bounds[i]]
702 l[i] = 1
703 if all([bounds[i][j] is None or not np.isfinite(bounds[i][j])
704 for j in rglen(bounds[i])]):
705 bounds[i] = None
706 if bounds[i] is not None and any([bounds[i][j] == (-1)**i * np.inf
707 for j in rglen(bounds[i])]):
708 raise ValueError('lower/upper is +inf/-inf and ' +
709 'therefore no finite feasible solution is available')
710 self.bounds = bounds
711
712 - def __call__(self, solutions, *args, **kwargs):
713 """return penalty or list of penalties, by default zero(s).
714
715 This interface seems too specifically tailored to the derived
716 BoundPenalty class, it should maybe change.
717
718 """
719 if np.isscalar(solutions[0]):
720 return 0.0
721 else:
722 return len(solutions) * [0.0]
723
724 - def update(self, *args, **kwargs):
725 return self
726
727 - def repair(self, x, copy_if_changed=True, copy_always=False):
728 """projects infeasible values on the domain bound, might be
729 overwritten by derived class """
730 if copy_always:
731 x = array(x, copy=True)
732 copy = False
733 else:
734 copy = copy_if_changed
735 if self.bounds is None:
736 return x
737 for ib in [0, 1]:
738 if self.bounds[ib] is None:
739 continue
740 for i in rglen(x):
741 idx = min([i, len(self.bounds[ib]) - 1])
742 if self.bounds[ib][idx] is not None and \
743 (-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
744 if copy:
745 x = array(x, copy=True)
746 copy = False
747 x[i] = self.bounds[ib][idx]
748
749 - def inverse(self, y, copy_if_changed=True, copy_always=False):
750 return y if not copy_always else array(y, copy=True)
751
752 - def get_bounds(self, which, dimension):
753 """``get_bounds('lower', 8)`` returns the lower bounds in 8-D"""
754 if which == 'lower' or which == 0:
755 return self._get_bounds(0, dimension)
756 elif which == 'upper' or which == 1:
757 return self._get_bounds(1, dimension)
758 else:
759 raise ValueError("argument which must be 'lower' or 'upper'")
760
761 - def _get_bounds(self, ib, dimension):
762 """ib == 0/1 means lower/upper bound, return a vector of length
763 `dimension` """
764 sign_ = 2 * ib - 1
765 assert sign_**2 == 1
766 if self.bounds is None or self.bounds[ib] is None:
767 return array(dimension * [sign_ * np.Inf])
768 res = []
769 for i in xrange(dimension):
770 res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])])
771 if res[-1] is None:
772 res[-1] = sign_ * np.Inf
773 return array(res)
774
775 - def has_bounds(self):
776 """return True, if any variable is bounded"""
777 bounds = self.bounds
778 if bounds in (None, [None, None]):
779 return False
780 for ib, bound in enumerate(bounds):
781 if bound is not None:
782 sign_ = 2 * ib - 1
783 for bound_i in bound:
784 if bound_i is not None and sign_ * bound_i < np.inf:
785 return True
786 return False
787
788 - def is_in_bounds(self, x):
789 """not yet tested"""
790 if self.bounds is None:
791 return True
792 for ib in [0, 1]:
793 if self.bounds[ib] is None:
794 continue
795 for i in rglen(x):
796 idx = min([i, len(self.bounds[ib]) - 1])
797 if self.bounds[ib][idx] is not None and \
798 (-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
799 return False
800 return True
801
802 - def to_dim_times_two(self, bounds):
803 """return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``,
804 as used by ``BoxConstraints...`` class.
805
806 """
807 if not bounds:
808 b = [[None, None]]
809 else:
810 l = [None, None] # figure out lenths
811 for i in [0, 1]:
812 try:
813 l[i] = len(bounds[i])
814 except TypeError:
815 bounds[i] = [bounds[i]]
816 l[i] = 1
817 b = [] # bounds in different format
818 try:
819 for i in xrange(max(l)):
820 b.append([bounds[0][i] if i < l[0] else None,
821 bounds[1][i] if i < l[1] else None])
822 except (TypeError, IndexError):
823 print("boundaries must be provided in the form " +
824 "[scalar_of_vector, scalar_or_vector]")
825 raise
826 return b
827
828 # ____________________________________________________________
829 # ____________________________________________________________
830 #
831 -class BoundNone(BoundaryHandlerBase):
832 - def __init__(self, bounds=None):
833 if bounds is not None:
834 raise ValueError()
835 BoundaryHandlerBase.__init__(self, None)
836 - def is_in_bounds(self, x):
837 return True
838
839 # ____________________________________________________________
840 # ____________________________________________________________
841 #
842 -class BoundTransform(BoundaryHandlerBase):
843 """Handles boundary by a smooth, piecewise linear and quadratic
844 transformation into the feasible domain.
845
846 >>> import cma
847 >>> b = cma.BoundTransform([None, 1])
848 >>> assert b.bounds == [[None], [1]]
849 >>> assert cma.Mh.vequals_approximately(b.repair([0, 1, 1.2]),
850 ... array([ 0., 0.975, 0.975]))
851 >>> assert b.is_in_bounds([0, 0.5, 1])
852 >>> assert cma.Mh.vequals_approximately(b.transform([0, 1, 2]),
853 ... [ 0. , 0.975, 0.2 ])
854 >>> o=cma.fmin(cma.fcts.sphere, 6 * [-2], 0.5, options={
855 ... 'boundary_handling': 'BoundTransform ',
856 ... 'bounds': [[], 5 * [-1] + [inf]] })
857 >>> assert o[1] < 5 + 1e-8
858
859 Details: this class uses ``class BoxConstraintsLinQuadTransformation``
860
861 """
862 - def __init__(self, bounds=None):
863 """Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
864 are lower and upper domain boundaries, each is either `None` or
865 a scalar or a list or array of appropriate size.
866
867 """
868 BoundaryHandlerBase.__init__(self, bounds)
869 self.bounds_tf = BoxConstraintsLinQuadTransformation(self.to_dim_times_two(bounds))
870
871 - def repair(self, x, copy_if_changed=True, copy_always=False):
872 """transforms ``x`` into the bounded domain.
873
874 ``copy_always`` option might disappear.
875
876 """
877 copy = copy_if_changed
878 if copy_always:
879 x = array(x, copy=True)
880 copy = False
881 if self.bounds is None or (self.bounds[0] is None and
882 self.bounds[1] is None):
883 return x
884 return self.bounds_tf(x, copy)
885
886 - def transform(self, x):
887 return self.repair(x)
888
889 - def inverse(self, x, copy_if_changed=True, copy_always=False):
890 """inverse transform of ``x`` from the bounded domain.
891
892 """
893 copy = copy_if_changed
894 if copy_always:
895 x = array(x, copy=True)
896 copy = False
897 if self.bounds is None or (self.bounds[0] is None and
898 self.bounds[1] is None):
899 return x
900 return self.bounds_tf.inverse(x, copy) # this doesn't exist
901
902 # ____________________________________________________________
903 # ____________________________________________________________
904 #
905 -class BoundPenalty(BoundaryHandlerBase):
906 """Computes the boundary penalty. Must be updated each iteration,
907 using the `update` method.
908
909 Details
910 -------
911 The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``,
912 where `xfeas` is the closest feasible (in-bounds) solution from `x`.
913 The weight `w[i]` should be updated during each iteration using
914 the update method.
915
916 Example:
917
918 >>> import cma
919 >>> cma.fmin(cma.felli, 6 * [1], 1,
920 ... {
921 ... 'boundary_handling': 'BoundPenalty',
922 ... 'bounds': [-1, 1],
923 ... 'fixed_variables': {0: 0.012, 2:0.234}
924 ... })
925
926 Reference: Hansen et al 2009, A Method for Handling Uncertainty...
927 IEEE TEC, with addendum, see
928 http://www.lri.fr/~hansen/TEC2009online.pdf
929
930 """
931 - def __init__(self, bounds=None):
932 """Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
933 are lower and upper domain boundaries, each is either `None` or
934 a scalar or a list or array of appropriate size.
935 """
936 # #
937 # bounds attribute reminds the domain boundary values
938 BoundaryHandlerBase.__init__(self, bounds)
939
940 self.gamma = 1 # a very crude assumption
941 self.weights_initialized = False # gamma becomes a vector after initialization
942 self.hist = [] # delta-f history
943
944 - def repair(self, x, copy_if_changed=True, copy_always=False):
945 """sets out-of-bounds components of ``x`` on the bounds.
946
947 """
948 # TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
949 # remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
950 copy = copy_if_changed
951 if copy_always:
952 x = array(x, copy=True)
953 bounds = self.bounds
954 if bounds not in (None, [None, None], (None, None)): # solely for effiency
955 x = array(x, copy=True) if copy and not copy_always else x
956 if bounds[0] is not None:
957 if np.isscalar(bounds[0]):
958 for i in rglen(x):
959 x[i] = max((bounds[0], x[i]))
960 else:
961 for i in rglen(x):
962 j = min([i, len(bounds[0]) - 1])
963 if bounds[0][j] is not None:
964 x[i] = max((bounds[0][j], x[i]))
965 if bounds[1] is not None:
966 if np.isscalar(bounds[1]):
967 for i in rglen(x):
968 x[i] = min((bounds[1], x[i]))
969 else:
970 for i in rglen(x):
971 j = min((i, len(bounds[1]) - 1))
972 if bounds[1][j] is not None:
973 x[i] = min((bounds[1][j], x[i]))
974 return x
975
976 # ____________________________________________________________
977 #
978 - def __call__(self, x, archive, gp):
979 """returns the boundary violation penalty for `x` ,where `x` is a
980 single solution or a list or array of solutions.
981
982 """
983 if x in (None, (), []):
984 return x
985 if self.bounds in (None, [None, None], (None, None)):
986 return 0.0 if np.isscalar(x[0]) else [0.0] * len(x) # no penalty
987
988 x_is_single_vector = np.isscalar(x[0])
989 x = [x] if x_is_single_vector else x
990
991 # add fixed variables to self.gamma
992 try:
993 gamma = list(self.gamma) # fails if self.gamma is a scalar
994 for i in sorted(gp.fixed_values): # fails if fixed_values is None
995 gamma.insert(i, 0.0)
996 gamma = array(gamma, copy=False)
997 except TypeError:
998 gamma = self.gamma
999 pen = []
1000 for xi in x:
1001 # CAVE: this does not work with already repaired values!!
1002 # CPU(N,lam,iter=20,200,100)?: 3s of 10s, array(xi): 1s
1003 # remark: one deep copy can be prevented by xold = xi first
1004 xpheno = gp.pheno(archive[xi]['geno'])
1005 # necessary, because xi was repaired to be in bounds
1006 xinbounds = self.repair(xpheno)
1007 # could be omitted (with unpredictable effect in case of external repair)
1008 fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
1009 pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
1010 return pen[0] if x_is_single_vector else pen
1011
1012 # ____________________________________________________________
1013 #
1014 - def feasible_ratio(self, solutions):
1015 """counts for each coordinate the number of feasible values in
1016 ``solutions`` and returns an array of length ``len(solutions[0])``
1017 with the ratios.
1018
1019 `solutions` is a list or array of repaired `Solution` instances
1020
1021 """
1022 count = np.zeros(len(solutions[0]))
1023 for x in solutions:
1024 count += x.unrepaired == x
1025 return count / float(len(solutions))
1026
1027 # ____________________________________________________________
1028 #
1029 - def update(self, function_values, es):
1030 """updates the weights for computing a boundary penalty.
1031
1032 Arguments
1033 ---------
1034 `function_values`
1035 all function values of recent population of solutions
1036 `es`
1037 `CMAEvolutionStrategy` object instance, in particular
1038 mean and variances and the methods from the attribute
1039 `gp` of type `GenoPheno` are used.
1040
1041 """
1042 if self.bounds is None or (self.bounds[0] is None and
1043 self.bounds[1] is None):
1044 return self
1045
1046 N = es.N
1047 # ## prepare
1048 # compute varis = sigma**2 * C_ii
1049 varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else (# scalar case
1050 es.C if np.isscalar(es.C[0]) else # diagonal matrix case
1051 [es.C[i][i] for i in xrange(N)])) # full matrix case
1052
1053 # relative violation in geno-space
1054 dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5
1055
1056 # ## Store/update a history of delta fitness value
1057 fvals = sorted(function_values)
1058 l = 1 + len(fvals)
1059 val = fvals[3 * l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
1060 val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
1061 # insert val in history
1062 if np.isfinite(val) and val > 0:
1063 self.hist.insert(0, val)
1064 elif val == inf and len(self.hist) > 1:
1065 self.hist.insert(0, max(self.hist))
1066 else:
1067 pass # ignore 0 or nan values
1068 if len(self.hist) > 20 + (3 * N) / es.popsize:
1069 self.hist.pop()
1070
1071 # ## prepare
1072 dfit = np.median(self.hist) # median interquartile range
1073 damp = min(1, es.sp.mueff / 10. / N)
1074
1075 # ## set/update weights
1076 # Throw initialization error
1077 if len(self.hist) == 0:
1078 raise _Error('wrongful initialization, no feasible solution sampled. ' +
1079 'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +
1080 'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')
1081 # initialize weights
1082 if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO
1083 self.gamma = array(N * [2 * dfit]) ## BUGBUGzzzz: N should be phenotypic (bounds are in phenotype), but is genotypic
1084 self.weights_initialized = True
1085 # update weights gamma
1086 if self.weights_initialized:
1087 edist = array(abs(dmean) - 3 * max(1, N**0.5 / es.sp.mueff))
1088 if 1 < 3: # this is better, around a factor of two
1089 # increase single weights possibly with a faster rate than they can decrease
1090 # value unit of edst is std dev, 3==random walk of 9 steps
1091 self.gamma *= exp((edist > 0) * np.tanh(edist / 3) / 2.)**damp
1092 # decrease all weights up to the same level to avoid single extremely small weights
1093 # use a constant factor for pseudo-keeping invariance
1094 self.gamma[self.gamma > 5 * dfit] *= exp(-1. / 3)**damp
1095 # self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)
1096 es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]
1097 # ## return penalty
1098 # es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]
1099 return self # bound penalty values
1100
1101 # ____________________________________________________________
1102 # ____________________________________________________________
1103 #
1104 -class BoxConstraintsTransformationBase(object):
1105 """Implements a transformation into boundaries and is used for
1106 boundary handling::
1107
1108 tf = BoxConstraintsTransformationAnyDerivedClass([[1, 4]])
1109 x = [3, 2, 4.4]
1110 y = tf(x) # "repaired" solution
1111 print(tf([2.5])) # middle value is never changed
1112 [2.5]
1113
1114 :See: ``BoundaryHandler``
1115
1116 """
1117 - def __init__(self, bounds):
1118 try:
1119 if len(bounds[0]) != 2:
1120 raise ValueError
1121 except:
1122 raise ValueError(' bounds must be either [[lb0, ub0]] or [[lb0, ub0], [lb1, ub1],...], \n where in both cases the last entry is reused for all remaining dimensions')
1123 self.bounds = bounds
1124 self.initialize()
1125
1126 - def initialize(self):
1127 """initialize in base class"""
1128 self._lb = [b[0] for b in self.bounds] # can be done more efficiently?
1129 self._ub = [b[1] for b in self.bounds]
1130
1131 - def _lowerupperval(self, a, b, c):
1132 return np.max([np.max(a), np.min([np.min(b), c])])
1133 - def bounds_i(self, i):
1134 """return ``[ith_lower_bound, ith_upper_bound]``"""
1135 return self.bounds[self._index(i)]
1136 - def __call__(self, solution_in_genotype):
1137 res = [self._transform_i(x, i) for i, x in enumerate(solution_in_genotype)]
1138 return res
1139 transform = __call__
1140 - def inverse(self, solution_in_phenotype, copy_if_changed=True, copy_always=True):
1141 return [self._inverse_i(y, i) for i, y in enumerate(solution_in_phenotype)]
1142 - def _index(self, i):
1143 return min((i, len(self.bounds) - 1))
1144 - def _transform_i(self, x, i):
1145 raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
1146 - def _inverse_i(self, y, i):
1147 raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
1148 - def shift_or_mirror_into_invertible_domain(self, solution_genotype):
1149 """return the reference solution that has the same ``box_constraints_transformation(solution)``
1150 value, i.e. ``tf.shift_or_mirror_into_invertible_domain(x) = tf.inverse(tf.transform(x))``.
1151 This is an idempotent mapping (leading to the same result independent how often it is
1152 repeatedly applied).
1153
1154 """
1155 return self.inverse(self(solution_genotype))
1156 raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
1157
1158 -class _BoxConstraintsTransformationTemplate(BoxConstraintsTransformationBase):
1159 """copy/paste this template to implement a new boundary handling transformation"""
1160 - def __init__(self, bounds):
1161 BoxConstraintsTransformationBase.__init__(self, bounds)
1162 - def initialize(self):
1163 BoxConstraintsTransformationBase.initialize(self) # likely to be removed
1164 - def _transform_i(self, x, i):
1165 raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
1166 - def _inverse_i(self, y, i):
1167 raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
1168 __doc__ = BoxConstraintsTransformationBase.__doc__ + __doc__
1169
1170 -class BoxConstraintsLinQuadTransformation(BoxConstraintsTransformationBase):
1171 """implements a bijective, monotonous transformation between [lb - al, ub + au]
1172 and [lb, ub] which is the identity (and therefore linear) in [lb + al, ub - au]
1173 (typically about 90% of the interval) and quadratic in [lb - 3*al, lb + al]
1174 and in [ub - au, ub + 3*au]. The transformation is periodically
1175 expanded beyond the limits (somewhat resembling the shape sin(x-pi/2))
1176 with a period of ``2 * (ub - lb + al + au)``.
1177
1178 Details
1179 =======
1180 Partly due to numerical considerations depend the values ``al`` and ``au``
1181 on ``abs(lb)`` and ``abs(ub)`` which makes the transformation non-translation
1182 invariant. In contrast to sin(.), the transformation is robust to "arbitrary"
1183 values for boundaries, e.g. a lower bound of ``-1e99`` or ``np.Inf`` or
1184 ``None``.
1185
1186 Examples
1187 ========
1188 Example to use with cma:
1189
1190 >>> import cma
1191 >>> # only the first variable has an upper bound
1192 >>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,None]]) # second==last pair is re-cycled
1193 >>> cma.fmin(cma.felli, 9 * [2], 1, {'transformation': [tf.transform, tf.inverse], 'verb_disp': 0})
1194 >>> # ...or...
1195 >>> es = cma.CMAEvolutionStrategy(9 * [2], 1)
1196 >>> while not es.stop():
1197 ... X = es.ask()
1198 ... f = [cma.felli(tf(x)) for x in X] # tf(x) == tf.transform(x)
1199 ... es.tell(X, f)
1200
1201 Example of the internal workings:
1202
1203 >>> import cma
1204 >>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,11], [1,11]])
1205 >>> tf.bounds
1206 [[1, 2], [1, 11], [1, 11]]
1207 >>> tf([1.5, 1.5, 1.5])
1208 [1.5, 1.5, 1.5]
1209 >>> tf([1.52, -2.2, -0.2, 2, 4, 10.4])
1210 [1.52, 4.0, 2.0, 2.0, 4.0, 10.4]
1211 >>> res = np.round(tf._au, 2)
1212 >>> assert list(res[:4]) == [ 0.15, 0.6, 0.6, 0.6]
1213 >>> res = [round(x, 2) for x in tf.shift_or_mirror_into_invertible_domain([1.52, -12.2, -0.2, 2, 4, 10.4])]
1214 >>> assert res == [1.52, 9.2, 2.0, 2.0, 4.0, 10.4]
1215 >>> tmp = tf([1]) # call with lower dimension
1216
1217 """
1218 - def __init__(self, bounds):
1219 """``x`` is defined in ``[lb - 3*al, ub + au + r - 2*al]`` with ``r = ub - lb + al + au``,
1220 and ``x == transformation(x)`` in ``[lb + al, ub - au]``.
1221 ``beta*x - alphal = beta*x - alphau`` is then defined in ``[lb, ub]``,
1222
1223 ``alphal`` and ``alphau`` represent the same value, but respectively numerically
1224 better suited for values close to lb and ub.
1225
1226 """
1227 # super().__init__(bounds) # only in Python 3.x available
1228 BoxConstraintsTransformationBase.__init__(self, bounds)
1229 # super(BB, self).__init__(bounds) # is supposed to call initialize
1230 # ## super(BoxConstraintsTransformationBase, self).__init__(bounds) # is probably invalid
1231
1232 - def initialize(self, length=None):
1233 """see ``__init__``"""
1234 if length is None:
1235 length = len(self.bounds)
1236 max_i = min((len(self.bounds) - 1, length - 1))
1237 self._lb = array([self.bounds[min((i, max_i))][0]
1238 if self.bounds[min((i, max_i))][0] is not None else -np.Inf
1239 for i in xrange(length)], copy=False)
1240 self._ub = array([self.bounds[min((i, max_i))][1]
1241 if self.bounds[min((i, max_i))][1] is not None else np.Inf
1242 for i in xrange(length)], copy=False)
1243 lb = self._lb
1244 ub = self._ub
1245 # define added values for lower and upper bound
1246 self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
1247 if np.isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
1248 self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
1249 if np.isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
1250
1251 - def __call__(self, solution_genotype, copy_if_changed=True, copy_always=False):
1252 # about four times faster version of array([self._transform_i(x, i) for i, x in enumerate(solution_genotype)])
1253 # still, this makes a typical run on a test function two times slower, but there might be one too many copies
1254 # during the transformations in gp
1255 if len(self._lb) != len(solution_genotype):
1256 self.initialize(len(solution_genotype))
1257 lb = self._lb
1258 ub = self._ub
1259 al = self._al
1260 au = self._au
1261
1262 if copy_always or not isinstance(solution_genotype[0], float):
1263 # transformed value is likely to be a float
1264 y = np.array(solution_genotype, copy=True, dtype=float)
1265 # if solution_genotype is not a float, copy value is disregarded
1266 copy = False
1267 else:
1268 y = solution_genotype
1269 copy = copy_if_changed
1270 idx = (y < lb - 2 * al - (ub - lb) / 2.0) | (y > ub + 2 * au + (ub - lb) / 2.0)
1271 if idx.any():
1272 r = 2 * (ub[idx] - lb[idx] + al[idx] + au[idx]) # period
1273 s = lb[idx] - 2 * al[idx] - (ub[idx] - lb[idx]) / 2.0 # start
1274 if copy:
1275 y = np.array(y, copy=True)
1276 copy = False
1277 y[idx] -= r * ((y[idx] - s) // r) # shift
1278 idx = y > ub + au
1279 if idx.any():
1280 if copy:
1281 y = np.array(y, copy=True)
1282 copy = False
1283 y[idx] -= 2 * (y[idx] - ub[idx] - au[idx])
1284 idx = y < lb - al
1285 if idx.any():
1286 if copy:
1287 y = np.array(y, copy=True)
1288 copy = False
1289 y[idx] += 2 * (lb[idx] - al[idx] - y[idx])
1290 idx = y < lb + al
1291 if idx.any():
1292 if copy:
1293 y = np.array(y, copy=True)
1294 copy = False
1295 y[idx] = lb[idx] + (y[idx] - (lb[idx] - al[idx]))**2 / 4 / al[idx]
1296 idx = y > ub - au
1297 if idx.any():
1298 if copy:
1299 y = np.array(y, copy=True)
1300 copy = False
1301 y[idx] = ub[idx] - (y[idx] - (ub[idx] + au[idx]))**2 / 4 / au[idx]
1302 # assert Mh.vequals_approximately(y, BoxConstraintsTransformationBase.__call__(self, solution_genotype))
1303 return y
1304 __call__.doc = BoxConstraintsTransformationBase.__doc__
1305 transform = __call__
1306 - def idx_infeasible(self, solution_genotype):
1307 """return indices of "infeasible" variables, that is,
1308 variables that do not directly map into the feasible domain such that
1309 ``tf.inverse(tf(x)) == x``.
1310
1311 """
1312 res = [i for i, x in enumerate(solution_genotype) if not self.is_feasible_i(x, i)]
1313 return res
1314 - def is_feasible_i(self, x, i):
1315 """return True if value ``x`` is in the invertible domain of
1316 variable ``i``
1317
1318 """
1319 lb = self._lb[self._index(i)]
1320 ub = self._ub[self._index(i)]
1321 al = self._al[self._index(i)]
1322 au = self._au[self._index(i)]
1323 return lb - al < x < ub + au
1324 - def is_loosely_feasible_i(self, x, i):
1325 """never used"""
1326 lb = self._lb[self._index(i)]
1327 ub = self._ub[self._index(i)]
1328 al = self._al[self._index(i)]
1329 au = self._au[self._index(i)]
1330 return lb - 2 * al - (ub - lb) / 2.0 <= x <= ub + 2 * au + (ub - lb) / 2.0
1331
1332 - def shift_or_mirror_into_invertible_domain(self, solution_genotype, copy=False):
1333 """Details: input ``solution_genotype`` is changed. The domain is
1334 [lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al]
1335 mirroring is applied.
1336
1337 """
1338 assert solution_genotype is not None
1339 if copy:
1340 y = [val for val in solution_genotype]
1341 else:
1342 y = solution_genotype
1343 if isinstance(y, np.ndarray) and not isinstance(y[0], float):
1344 y = array(y, dtype=float)
1345 for i in rglen(y):
1346 lb = self._lb[self._index(i)]
1347 ub = self._ub[self._index(i)]
1348 al = self._al[self._index(i)]
1349 au = self._au[self._index(i)]
1350 # x is far from the boundary, compared to ub - lb
1351 if y[i] < lb - 2 * al - (ub - lb) / 2.0 or y[i] > ub + 2 * au + (ub - lb) / 2.0:
1352 r = 2 * (ub - lb + al + au) # period
1353 s = lb - 2 * al - (ub - lb) / 2.0 # start
1354 y[i] -= r * ((y[i] - s) // r) # shift
1355 if y[i] > ub + au:
1356 y[i] -= 2 * (y[i] - ub - au)
1357 if y[i] < lb - al:
1358 y[i] += 2 * (lb - al - y[i])
1359 return y
1360 shift_or_mirror_into_invertible_domain.__doc__ = BoxConstraintsTransformationBase.shift_or_mirror_into_invertible_domain.__doc__ + shift_or_mirror_into_invertible_domain.__doc__
1361
1362 - def _shift_or_mirror_into_invertible_i(self, x, i):
1363 """shift into the invertible domain [lb - ab, ub + au], mirror close to
1364 boundaries in order to get a smooth transformation everywhere
1365
1366 """
1367 assert x is not None
1368 lb = self._lb[self._index(i)]
1369 ub = self._ub[self._index(i)]
1370 al = self._al[self._index(i)]
1371 au = self._au[self._index(i)]
1372 # x is far from the boundary, compared to ub - lb
1373 if x < lb - 2 * al - (ub - lb) / 2.0 or x > ub + 2 * au + (ub - lb) / 2.0:
1374 r = 2 * (ub - lb + al + au) # period
1375 s = lb - 2 * al - (ub - lb) / 2.0 # start
1376 x -= r * ((x - s) // r) # shift
1377 if x > ub + au:
1378 x -= 2 * (x - ub - au)
1379 if x < lb - al:
1380 x += 2 * (lb - al - x)
1381 return x
1382 - def _transform_i(self, x, i):
1383 """return transform of x in component i"""
1384 x = self._shift_or_mirror_into_invertible_i(x, i)
1385 lb = self._lb[self._index(i)]
1386 ub = self._ub[self._index(i)]
1387 al = self._al[self._index(i)]
1388 au = self._au[self._index(i)]
1389 if x < lb + al:
1390 return lb + (x - (lb - al))**2 / 4 / al
1391 elif x < ub - au:
1392 return x
1393 elif x < ub + 3 * au:
1394 return ub - (x - (ub + au))**2 / 4 / au
1395 else:
1396 assert False # shift removes this case
1397 return ub + au - (x - (ub + au))
1398 - def _inverse_i(self, y, i):
1399 """return inverse of y in component i"""
1400 lb = self._lb[self._index(i)]
1401 ub = self._ub[self._index(i)]
1402 al = self._al[self._index(i)]
1403 au = self._au[self._index(i)]
1404 if 1 < 3:
1405 if not lb <= y <= ub:
1406 raise ValueError('argument of inverse must be within the given bounds')
1407 if y < lb + al:
1408 return (lb - al) + 2 * (al * (y - lb))**0.5
1409 elif y < ub - au:
1410 return y
1411 else:
1412 return (ub + au) - 2 * (au * (ub - y))**0.5
1413
1414 # ____________________________________________________________
1415 # ____________________________________________________________
1416 #
1417 -class GenoPheno(object):
1418 """Genotype-phenotype transformation.
1419
1420 Method `pheno` provides the transformation from geno- to phenotype,
1421 that is from the internal representation to the representation used
1422 in the objective function. Method `geno` provides the "inverse" pheno-
1423 to genotype transformation. The geno-phenotype transformation comprises,
1424 in this order:
1425
1426 - insert fixed variables (with the phenotypic and therefore quite
1427 possibly "wrong" values)
1428 - affine linear transformation (scaling and shift)
1429 - user-defined transformation
1430 - repair (e.g. into feasible domain due to boundaries)
1431 - assign fixed variables their original phenotypic value
1432
1433 By default all transformations are the identity. The repair is only applied,
1434 if the transformation is given as argument to the method `pheno`.
1435
1436 ``geno`` is only necessary, if solutions have been injected.
1437
1438 """
1439 - def __init__(self, dim, scaling=None, typical_x=None, fixed_values=None, tf=None):
1440 """return `GenoPheno` instance with fixed phenotypic dimension `dim`.
1441
1442 Keyword Arguments
1443 -----------------
1444 `scaling`
1445 the diagonal of a scaling transformation matrix, multipliers
1446 in the genotyp-phenotyp transformation, see `typical_x`
1447 `typical_x`
1448 ``pheno = scaling*geno + typical_x``
1449 `fixed_values`
1450 a dictionary of variable indices and values, like ``{0:2.0, 2:1.1}``,
1451 that are not subject to change, negative indices are ignored
1452 (they act like incommenting the index), values are phenotypic
1453 values.
1454 `tf`
1455 list of two user-defined transformation functions, or `None`.
1456
1457 ``tf[0]`` is a function that transforms the internal representation
1458 as used by the optimizer into a solution as used by the
1459 objective function. ``tf[1]`` does the back-transformation.
1460 For example ::
1461
1462 tf_0 = lambda x: [xi**2 for xi in x]
1463 tf_1 = lambda x: [abs(xi)**0.5 fox xi in x]
1464
1465 or "equivalently" without the `lambda` construct ::
1466
1467 def tf_0(x):
1468 return [xi**2 for xi in x]
1469 def tf_1(x):
1470 return [abs(xi)**0.5 fox xi in x]
1471
1472 ``tf=[tf_0, tf_1]`` is a reasonable way to guaranty that only positive
1473 values are used in the objective function.
1474
1475 Details
1476 -------
1477 If ``tf_1`` is ommitted, the initial x-value must be given as genotype (because
1478 the phenotype-genotype transformation is unknown in this case) and "injection" of
1479 solutions might lead to unexpected results.
1480
1481 """
1482 self.N = dim
1483 self.fixed_values = fixed_values
1484 if tf is not None:
1485 self.tf_pheno = tf[0]
1486 self.tf_geno = tf[1] # TODO: should not necessarily be needed
1487 # r = np.random.randn(dim)
1488 # assert all(tf[0](tf[1](r)) - r < 1e-7)
1489 # r = np.random.randn(dim)
1490 # assert all(tf[0](tf[1](r)) - r > -1e-7)
1491 print("WARNING in class GenoPheno: user defined transformations have not been tested thoroughly")
1492 else:
1493 self.tf_geno = None
1494 self.tf_pheno = None
1495
1496 if fixed_values:
1497 if not isinstance(fixed_values, dict):
1498 raise _Error("fixed_values must be a dictionary {index:value,...}")
1499 if max(fixed_values.keys()) >= dim:
1500 raise _Error("max(fixed_values.keys()) = " + str(max(fixed_values.keys())) +
1501 " >= dim=N=" + str(dim) + " is not a feasible index")
1502 # convenience commenting functionality: drop negative keys
1503 for k in list(fixed_values.keys()):
1504 if k < 0:
1505 fixed_values.pop(k)
1506
1507 def vec_is_default(vec, default_val=0):
1508 """return True if `vec` has the value `default_val`,
1509 None or [None] are also recognized as default
1510
1511 """
1512 # TODO: rather let default_val be a list of default values, cave comparison of arrays
1513 try:
1514 if len(vec) == 1:
1515 vec = vec[0] # [None] becomes None and is always default
1516 except TypeError:
1517 pass # vec is a scalar
1518
1519 if vec is None or vec == default_val:
1520 return True
1521 try:
1522 if vec == array(None):
1523 return True
1524 except NotImplementedError:
1525 pass
1526 return False
1527
1528 self.scales = array(scaling) if scaling is not None else None
1529 if vec_is_default(self.scales, 1):
1530 self.scales = 1 # CAVE: 1 is not array(1)
1531 elif self.scales.shape is not () and len(self.scales) != self.N:
1532 raise _Error('len(scales) == ' + str(len(self.scales)) +
1533 ' does not match dimension N == ' + str(self.N))
1534
1535 self.typical_x = array(typical_x) if typical_x is not None else None
1536 if vec_is_default(self.typical_x, 0):
1537 self.typical_x = 0
1538 elif self.typical_x.shape is not () and len(self.typical_x) != self.N:
1539 raise _Error('len(typical_x) == ' + str(len(self.typical_x)) +
1540 ' does not match dimension N == ' + str(self.N))
1541
1542 if (self.scales is 1 and
1543 self.typical_x is 0 and
1544 self.fixed_values is None and
1545 self.tf_pheno is None):
1546 self.isidentity = True
1547 else:
1548 self.isidentity = False
1549
1550 - def pheno(self, x, into_bounds=(lambda x, copy=False: x if not copy else array(x, copy=copy)),
1551 copy=True, copy_always=False,
1552 archive=None, iteration=None):
1553 """maps the genotypic input argument into the phenotypic space, see
1554 help for class `GenoPheno`
1555
1556 Details
1557 -------
1558 If ``copy``, values from ``x`` are copied if changed under the transformation.
1559
1560 """
1561 # TODO: copy_always seems superfluous, as it could be done in the calling code
1562 if copy_always and not copy:
1563 raise ValueError('arguments copy_always=' + str(copy_always) +
1564 ' and copy=' + str(copy) + ' have inconsistent values')
1565 if copy_always:
1566 x = array(x, copy=True)
1567 copy = False
1568
1569 if self.isidentity:
1570 y = into_bounds(x) # was into_bounds(x, False) before (bug before v0.96.22)
1571 else:
1572 if self.fixed_values is None:
1573 y = array(x, copy=copy) # make a copy, in case
1574 else: # expand with fixed values
1575 y = list(x) # is a copy
1576 for i in sorted(self.fixed_values.keys()):
1577 y.insert(i, self.fixed_values[i])
1578 y = array(y, copy=False)
1579 copy = False
1580
1581 if self.scales is not 1: # just for efficiency
1582 y *= self.scales
1583
1584 if self.typical_x is not 0:
1585 y += self.typical_x
1586
1587 if self.tf_pheno is not None:
1588 y = array(self.tf_pheno(y), copy=False)
1589
1590 y = into_bounds(y, copy) # copy is False
1591
1592 if self.fixed_values is not None:
1593 for i, k in list(self.fixed_values.items()):
1594 y[i] = k
1595
1596 if archive is not None:
1597 archive.insert(y, geno=x, iteration=iteration)
1598 return y
1599
1600 - def geno(self, y, from_bounds=lambda x: x,
1601 copy_if_changed=True, copy_always=False,
1602 repair=None, archive=None):
1603 """maps the phenotypic input argument into the genotypic space,
1604 that is, computes essentially the inverse of ``pheno``.
1605
1606 By default a copy is made only to prevent to modify ``y``.
1607
1608 The inverse of the user-defined transformation (if any)
1609 is only needed if external solutions are injected, it is not
1610 applied to the initial solution x0.
1611
1612 Details
1613 =======
1614 ``geno`` searches first in ``archive`` for the genotype of
1615 ``y`` and returns the found value, typically unrepaired.
1616 Otherwise, first ``from_bounds`` is applied, to revert a
1617 projection into the bound domain, (if necessary) and ``pheno``
1618 is reverted. ``repair`` is applied last, and is usually the
1619 method ``CMAEvolutionStrategy.repair_genotype`` that limits the
1620 Mahalanobis norm of ``geno(y) - mean``.
1621
1622 """
1623 if archive is not None:
1624 try:
1625 x = archive[y]['geno']
1626 except KeyError:
1627 x = None
1628 if x is not None:
1629 if archive[y]['iteration'] < archive.last_iteration and repair is not None:
1630 x = repair(x, copy_if_changed=copy_always)
1631 return x
1632 x = y
1633 if copy_always:
1634 x = array(y, copy=True)
1635 copy = False
1636 else:
1637 copy = copy_if_changed
1638
1639 x = from_bounds(x) # TODO should also take copy?
1640
1641 if self.isidentity:
1642 if repair is not None:
1643 x = repair(x, copy)
1644 return x
1645
1646 if copy: # could be improved?
1647 x = array(x, copy=True)
1648 copy = False
1649
1650 # user-defined transformation
1651 if self.tf_geno is not None:
1652 x = array(self.tf_geno(x), copy=False)
1653 elif self.tf_pheno is not None:
1654 raise ValueError('t1 of options transformation was not defined but is needed as being the inverse of t0')
1655
1656 # affine-linear transformation: shift and scaling
1657 if self.typical_x is not 0:
1658 x -= self.typical_x
1659 if self.scales is not 1: # just for efficiency
1660 x /= self.scales
1661
1662 # kick out fixed_values
1663 if self.fixed_values is not None:
1664 # keeping the transformed values does not help much
1665 # therefore it is omitted
1666 if 1 < 3:
1667 keys = sorted(self.fixed_values.keys())
1668 x = array([x[i] for i in range(len(x)) if i not in keys], copy=False)
1669 # repair injected solutions
1670 if repair is not None:
1671 x = repair(x, copy)
1672 return x
1673
1674 # ____________________________________________________________
1675 # ____________________________________________________________
1676 # check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
1677 # see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
1678 #
1679 -class OOOptimizer(object):
1680 """"abstract" base class for an OO optimizer interface.
1681
1682 Relevant methods are `__init__`, `ask`, `tell`, `stop`, `result`,
1683 and `optimize`. Only `optimize` is fully implemented in this base
1684 class.
1685
1686 Examples
1687 --------
1688 All examples minimize the function `elli`, the output is not shown.
1689 (A preferred environment to execute all examples is ``ipython -pylab``.)
1690 First we need ::
1691
1692 from cma import CMAEvolutionStrategy, CMADataLogger # CMAEvolutionStrategy derives from the OOOptimizer class
1693 elli = lambda x: sum(1e3**((i-1.)/(len(x)-1.)*x[i])**2 for i in range(len(x)))
1694
1695 The shortest example uses the inherited method `OOOptimizer.optimize()`::
1696
1697 res = CMAEvolutionStrategy(8 * [0.1], 0.5).optimize(elli)
1698
1699 The input parameters to `CMAEvolutionStrategy` are specific to this
1700 inherited class. The remaining functionality is based on interface
1701 defined by `OOOptimizer`. We might have a look at the result::
1702
1703 print(res[0]) # best solution and
1704 print(res[1]) # its function value
1705
1706 `res` is the return value from method `CMAEvolutionStrategy.result()`
1707 appended with CMAEvolutionStrategy.logger.
1708 In order to display more exciting output we do ::
1709
1710 res[-1].plot() # if matplotlib is available
1711
1712 Virtually the same example can be written with an explicit loop
1713 instead of using `optimize()`. This gives the necessary insight into
1714 the `OOOptimizer` class interface and gives entire control over the
1715 iteration loop::
1716
1717 optim = CMAEvolutionStrategy(9 * [0.5], 0.3) # a new CMAEvolutionStrategy instance calling CMAEvolutionStrategy.__init__()
1718 logger = CMADataLogger(optim) # get a logger instance, we could also use the instance optim.logger
1719
1720 # this loop resembles optimize()
1721 while not optim.stop(): # iterate
1722 X = optim.ask() # get candidate solutions
1723 f = [elli(x) for x in X] # evaluate solutions
1724 # maybe do something else that needs to be done
1725 optim.tell(X, f) # do all the real work: prepare for next iteration
1726 optim.disp(20) # display info every 20th iteration
1727 logger.add() # log another "data line"
1728
1729 # final output
1730 print('termination by', optim.stop())
1731 print('best f-value =', optim.result()[1])
1732 print('best solution =', optim.result()[0])
1733 logger.plot() # if matplotlib is available
1734 raw_input('press enter to continue') # prevents exiting and closing figures
1735
1736 Details
1737 -------
1738 Most of the work is done in the method `tell(...)`. The method `result()` returns
1739 more useful output.
1740
1741 """
1742 - def __init__(self, xstart, **more_args):
1743 """``xstart`` is a mandatory argument"""
1744 self.xstart = xstart
1745 self.more_args = more_args
1746 self.initialize()
1747 - def initialize(self):
1748 """(re-)set to the initial state"""
1749 self.countiter = 0
1750 self.xcurrent = self.xstart[:]
1751 raise NotImplementedError('method initialize() must be implemented in derived class')
1752 - def ask(self):
1753 """abstract method, AKA "get" or "sample_distribution", deliver
1754 new candidate solution(s), a list of "vectors"
1755 """
1756 raise NotImplementedError('method ask() must be implemented in derived class')
1757 - def tell(self, solutions, function_values):
1758 """abstract method, AKA "update", prepare for next iteration"""
1759 self.countiter += 1
1760 raise NotImplementedError('method tell() must be implemented in derived class')
1761 - def stop(self):
1762 """abstract method, return satisfied termination conditions in
1763 a dictionary like ``{'termination reason': value, ...}``,
1764 for example ``{'tolfun': 1e-12}``, or the empty dictionary ``{}``.
1765 The implementation of `stop()` should prevent an infinite
1766 loop.
1767 """
1768 raise NotImplementedError('method stop() is not implemented')
1769 - def disp(self, modulo=None):
1770 """abstract method, display some iteration infos if
1771 ``self.iteration_counter % modulo == 0`` """
1772 pass # raise NotImplementedError('method disp() is not implemented')
1773 - def result(self):
1774 """abstract method, return ``(x, f(x), ...)``, that is, the
1775 minimizer, its function value, ..."""
1776 raise NotImplementedError('method result() is not implemented')
1777
1778 # previous ordering:
1779 # def optimize(self, objectivefct,
1780 # logger=None, verb_disp=20,
1781 # iterations=None, min_iterations=1,
1782 # call_back=None):
1783 - def optimize(self, objective_fct,
1784 iterations=None, min_iterations=1, args=(),
1785 verb_disp=None, logger=None,
1786 call_back=None):
1787 """find minimizer of `objective_fct`.
1788
1789 CAVEAT: the return value for `optimize` is versatile and might
1790 change in near future to ``self``.
1791
1792 Arguments
1793 ---------
1794
1795 `objective_fct`
1796 function be to minimized
1797 `iterations`
1798 number of (maximal) iterations, while ``not self.stop()``
1799 `min_iterations`
1800 minimal number of iterations, even if ``not self.stop()``
1801 `args`
1802 arguments passed to `objective_fct`
1803 `verb_disp`
1804 print to screen every `verb_disp` iteration, if ``None``
1805 the value from ``self.logger`` is "inherited", if
1806 available.
1807 ``logger``
1808 a `BaseDataLogger` instance, which must be compatible
1809 with the type of ``self``.
1810 ``call_back``
1811 call back function called like ``call_back(self)`` or
1812 a list of call back functions.
1813
1814 ``return self.result() + (self.stop(), self, logger)`` which
1815 might change in near future.
1816
1817 Example
1818 -------
1819 >>> import cma
1820 >>> res = cma.CMAEvolutionStrategy(7 * [0.1], 0.5).optimize(cma.fcts.rosen, verb_disp=100)
1821 (4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
1822 Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1823 1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
1824 2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
1825 3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
1826 100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
1827 200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
1828 300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
1829 400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
1830 438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
1831 438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
1832 ('termination by', {'tolfun': 1e-11})
1833 ('best f-value =', 1.1189867885201275e-14)
1834 ('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
1835 0.99999996, 0.99999992]))
1836 >>> print(res[0])
1837 [ 1. 1. 1. 0.99999999 0.99999998 0.99999996
1838 0.99999992]
1839
1840 """
1841 assert iterations is None or min_iterations <= iterations
1842 if not hasattr(self, 'logger'):
1843 self.logger = logger
1844 if logger is None:
1845 logger = self.logger
1846 self.logger_from_optimize_method_call = logger
1847 if not isinstance(call_back, list):
1848 call_back = [call_back]
1849
1850 citer = 0
1851 while not self.stop() or citer < min_iterations:
1852 if iterations is not None and citer >= iterations:
1853 return self.result()
1854 citer += 1
1855
1856 X = self.ask() # deliver candidate solutions
1857 fitvals = [objective_fct(x, *args) for x in X]
1858 self.tell(X, fitvals) # all the work is done here
1859 self.disp(verb_disp)
1860 for f in call_back:
1861 if f is not None:
1862 f(self)
1863 logger.add(self) if logger else None
1864
1865 # signal logger that we left the loop
1866 # TODO: this is very ugly, because it assumes modulo keyword
1867 # argument *and* modulo attribute to be available
1868 try:
1869 logger.add(self, modulo=bool(logger.modulo)) if logger else None
1870 except TypeError:
1871 print(' suppressing the final call of the logger in OOOptimizer.optimize (modulo keyword parameter not available)')
1872 except AttributeError:
1873 print(' suppressing the final call of the logger in OOOptimizer.optimize (modulo attribute not available)')
1874 if verb_disp:
1875 self.disp(1)
1876 if verb_disp in (1, True):
1877 print('termination by', self.stop())
1878 print('best f-value =', self.result()[1])
1879 print('solution =', self.result()[0])
1880
1881 return self.result() + (self.stop(), self, logger)
1882
1883 -class CMAAdaptSigmaBase(object):
1884 """step-size adaptation base class, implementing hsig functionality
1885 via an isotropic evolution path.
1886
1887 """
1888 - def __init__(self, *args, **kwargs):
1889 self.is_initialized_base = False
1890 self._ps_updated_iteration = -1
1891 - def initialize_base(self, es):
1892 """set parameters and state variable based on dimension,
1893 mueff and possibly further options.
1894
1895 """
1896 self.cs = (es.sp.mueff + 2) / (es.N + es.sp.mueff + 3)
1897 self.ps = np.zeros(es.N)
1898 self.is_initialized_base = True
1899 - def _update_ps(self, es):
1900 """update the isotropic evolution path
1901
1902 :type es: CMAEvolutionStrategy
1903 """
1904 if not self.is_initialized_base:
1905 self.initialize_base(es)
1906 if self._ps_updated_iteration == es.countiter:
1907 return
1908 if es.countiter <= es.itereigenupdated:
1909 # es.B and es.D must/should be those from the last iteration
1910 assert es.countiter >= es.itereigenupdated
1911 _print_warning('distribution transformation (B and D) have been updated before ps could be computed',
1912 '_update_ps', 'CMAAdaptSigmaBase')
1913 z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec))
1914 z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean
1915 # self.cs or es.sp.cs could be used here
1916 self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z
1917 self._ps_updated_iteration = es.countiter
1918 - def hsig(self, es):
1919 """return "OK-signal" for rank-one update, `True` (OK) or `False`
1920 (stall rank-one update), based on the length of an evolution path
1921
1922 """
1923 self._update_ps(es)
1924 if self.ps is None:
1925 return True
1926 squared_sum = sum(self.ps**2) / (1 - (1 - self.cs)**(2 * es.countiter))
1927 # correction with self.countiter seems not necessary,
1928 # as pc also starts with zero
1929 return squared_sum / es.N - 1 < 1 + 4. / (es.N + 1)
1930 - def update(self, es, **kwargs):
1931 """update ``es.sigma``"""
1932 self._update_ps(es)
1933 raise NotImplementedError('must be implemented in a derived class')
1934 -class CMAAdaptSigmaNone(CMAAdaptSigmaBase):
1935 - def update(self, es, **kwargs):
1936 """no update, ``es.sigma`` remains constant.
1937
1938 :param es: ``CMAEvolutionStrategy`` class instance
1939 :param kwargs: whatever else is needed to update ``es.sigma``
1940
1941 """
1942 pass
1943 -class CMAAdaptSigmaDistanceProportional(CMAAdaptSigmaBase):
1944 """artificial setting of ``sigma`` for test purposes, e.g.
1945 to simulate optimal progress rates.
1946
1947 """
1948 - def __init__(self, coefficient=1.2):
1949 self.coefficient = coefficient
1950 - def update(self, es, **kwargs):
1951 # optimal step-size is
1952 es.sigma = self.coefficient * self.sp.mueff * sum(self.mean**2)**0.5 / self.N
1953 -class CMAAdaptSigmaCSA(CMAAdaptSigmaBase):
1954 - def __init__(self):
1955 """postpone initialization to a method call where dimension and mueff should be known.
1956
1957 """
1958 self.is_initialized = False
1959 - def initialize(self, es):
1960 """set parameters and state variable based on dimension,
1961 mueff and possibly further options.
1962
1963 """
1964 self.disregard_length_setting = True if es.opts['CSA_disregard_length'] else False
1965 if es.opts['CSA_clip_length_value'] is not None:
1966 try:
1967 if len(es.opts['CSA_clip_length_value']) == 0:
1968 es.opts['CSA_clip_length_value'] = [-np.Inf, np.Inf]
1969 elif len(es.opts['CSA_clip_length_value']) == 1:
1970 es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value'][0]]
1971 elif len(es.opts['CSA_clip_length_value']) == 2:
1972 es.opts['CSA_clip_length_value'] = np.sort(es.opts['CSA_clip_length_value'])
1973 else:
1974 raise ValueError('option CSA_clip_length_value should be a number of len(.) in [1,2]')
1975 except TypeError: # len(...) failed
1976 es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value']]
1977 es.opts['CSA_clip_length_value'] = list(np.sort(es.opts['CSA_clip_length_value']))
1978 if es.opts['CSA_clip_length_value'][0] > 0 or es.opts['CSA_clip_length_value'][1] < 0:
1979 raise ValueError('option CSA_clip_length_value must be a single positive or a negative and a positive number')
1980 self.cs = (es.sp.mueff + 2) / (es.N + es.sp.mueff + 3)
1981 self.damps = es.opts['CSA_dampfac'] * (0.5 +
1982 0.5 * min([1, (es.sp.lam_mirr / (0.159 * es.sp.popsize) - 1)**2])**1 +
1983 2 * max([0, ((es.sp.mueff - 1) / (es.N + 1))**es.opts['CSA_damp_mueff_exponent'] - 1]) +
1984 self.cs
1985 )
1986 self.max_delta_log_sigma = 1 # in symmetric use (strict lower bound is -cs/damps anyway)
1987
1988 if self.disregard_length_setting:
1989 es.opts['CSA_clip_length_value'] = [0, 0]
1990 self.cs = (es.sp.mueff + 1)**0.5 / (es.N**0.5 + 2 * es.sp.mueff**0.5)
1991 self.damps = es.opts['CSA_dampfac'] * 1 # * (1.1 - 1/(es.N+1)**0.5)
1992 if es.opts['verbose'] > 1 or self.disregard_length_setting or 11 < 3:
1993 print('SigmaCSA Parameters')
1994 for k, v in self.__dict__.items():
1995 print(' ', k, ':', v)
1996 self.ps = np.zeros(es.N)
1997 self._ps_updated_iteration = -1
1998 self.is_initialized = True
1999
2000 - def _update_ps(self, es):
2001 if not self.is_initialized:
2002 self.initialize(es)
2003 if self._ps_updated_iteration == es.countiter:
2004 return
2005 z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec))
2006 z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean
2007 # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
2008 if es.opts['CSA_clip_length_value'] is not None:
2009 vals = es.opts['CSA_clip_length_value']
2010 min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2)
2011 max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2)
2012 act_len = sum(z**2)**0.5
2013 new_len = Mh.minmax(act_len, min_len, max_len)
2014 if new_len != act_len:
2015 z *= new_len / act_len
2016 # z *= (es.N / sum(z**2))**0.5 # ==> sum(z**2) == es.N
2017 # z *= es.const.chiN / sum(z**2)**0.5
2018 self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z
2019 self._ps_updated_iteration = es.countiter
2020 - def update(self, es, **kwargs):
2021 self._update_ps(es) # caveat: if es.B or es.D are already updated and ps is not, this goes wrong!
2022 if es.opts['CSA_squared']:
2023 s = (sum(self.ps**2) / es.N - 1) / 2
2024 # sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed
2025 # divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1
2026 else:
2027 s = sum(self.ps**2)**0.5 / es.const.chiN - 1
2028 s *= self.cs / self.damps
2029 s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma)
2030 es.sigma *= np.exp(s_clipped)
2031 # "error" handling
2032 if s_clipped != s:
2033 _print_warning('sigma change exp(' + str(s) + ') = ' + str(np.exp(s)) +
2034 ' clipped to exp(+-' + str(self.max_delta_log_sigma) + ')',
2035 'update',
2036 'CMAAdaptSigmaCSA',
2037 es.countiter, es.opts['verbose'])
2038 -class CMAAdaptSigmaMedianImprovement(CMAAdaptSigmaBase):
2039 """Compares median fitness against a fitness percentile of the previous iteration,
2040 see Ait ElHara et al, GECCO 2013.
2041
2042 """
2043 - def __init__(self):
2044 CMAAdaptSigmaBase.__init__(self)
2045 - def initialize(self, es):
2046 r = es.sp.mueff / es.popsize
2047 self.index_to_compare = 0.5 * (r**0.5 + 2.0 * (1 - r**0.5) / log(es.N + 9)**2) * (es.popsize) # TODO
2048 self.index_to_compare = (0.30 if not es.opts['vv'] else es.opts['vv']) * es.popsize # TODO
2049 self.damp = 2 - 2 / es.N # sign-rule: 2
2050 self.c = 0.3 # sign-rule needs <= 0.3
2051 self.s = 0 # averaged statistics, usually between -1 and +1
2052 - def update(self, es, **kwargs):
2053 if es.countiter < 2:
2054 self.initialize(es)
2055 self.fit = es.fit.fit
2056 else:
2057 ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))]
2058 ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))]
2059 pt2 = self.index_to_compare - int(self.index_to_compare)
2060 # ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use
2061 s = 0
2062 if 1 < 3:
2063 s += (1 - pt2) * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))])
2064 s += pt2 * sum(es.fit.fit < self.fit[int(self.index_to_compare)])
2065 s -= (es.popsize + 1) / 2
2066 s *= 2 / es.popsize # the range was popsize, is 2
2067 self.s = (1 - self.c) * self.s + self.c * s
2068 es.sigma *= exp(self.s / self.damp)
2069 # es.more_to_write.append(10**(self.s))
2070
2071 #es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2)))
2072 # # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2])))
2073 # # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2])))
2074 self.fit = es.fit.fit
2075 -class CMAAdaptSigmaTPA(CMAAdaptSigmaBase):
2076 """two point adaptation for step-size sigma. Relies on a specific
2077 sampling of the first two offspring, whose objective function
2078 value ranks are used to decide on the step-size change.
2079
2080 Example
2081 =======
2082
2083 >>> import cma
2084 >>> cma.CMAOptions('adapt').pprint()
2085 >>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.1, {'AdaptSigma': cma.CMAAdaptSigmaTPA, 'ftarget': 1e-8})
2086 >>> es.optimize(cma.fcts.rosen)
2087 >>> assert 'ftarget' in es.stop()
2088 >>> assert es.result()[1] <= 1e-8
2089 >>> assert es.result()[2] < 6500 # typically < 5500
2090
2091 References: loosely based on Hansen 2008, CMA-ES with Two-Point
2092 Step-Size Adaptation, more tightly based on an upcoming paper by
2093 Hansen et al.
2094
2095 """
2096 - def __init__(self, dimension=None, opts=None):
2097 CMAAdaptSigmaBase.__init__(self) # base class provides method hsig()
2098 self.initialized = False
2099 - def initialize(self, N):
2100 self.sp = _BlancClass()
2101 self.sp.damp = eval('N**0.5') # why do we need 10 <-> exp(1/10) == 1.1? 2 should be fine!?
2102 self.sp.dampup = 1.0 * self.sp.damp # 0.5 fails to converge on the Rastrigin function
2103 self.sp.dampdown = 1.0 * self.sp.damp
2104 self.sp.c = 1.0 # rank difference is asymetric and therefore the switch from increase to decrease takes too long
2105 self.sp.z_exponent = 0.5 # sign(z) * abs(z)**z_exponent, 0.5 seems better with larger popsize
2106 self.sp.sigma_fac = 1.0 # (obsolete) 0.5 feels better, but no evidence whether it is
2107 self.sp.relative_to_delta_mean = True # (obsolete)
2108 self.s = 0 # the state variable
2109 self.last = None
2110 self.initialized = True
2111 return self
2112 - def update(self, es, function_values, **kwargs):
2113 """the first and second value in ``function_values``
2114 must reflect two mirrored solutions sampled
2115 in direction / in opposite direction of
2116 the previous mean shift, respectively.
2117
2118 """
2119 # TODO: on the linear function, the two mirrored samples lead
2120 # to a sharp increase of condition of the covariance matrix.
2121 # They should not be used to update the covariance matrix,
2122 # if the step-size inreases quickly.
2123 if not self.initialized:
2124 self.initialize(es.N)
2125 if 1 < 3:
2126 # use the ranking difference of the mirrors for adaptation
2127 # damp = 5 should be fine
2128 z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0]
2129 z /= es.popsize - 1
2130 self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent
2131 if self.s > 0:
2132 es.sigma *= exp(self.s / self.sp.dampup)
2133 else:
2134 es.sigma *= exp(self.s / self.sp.dampdown)
2135 #es.more_to_write.append(10**z)
2136
2137
2138 # ____________________________________________________________
2139 # ____________________________________________________________
2140 #
2141 -class CMAEvolutionStrategy(OOOptimizer):
2142 """CMA-ES stochastic optimizer class with ask-and-tell interface.
2143
2144 Calling Sequences
2145 =================
2146
2147 ``es = CMAEvolutionStrategy(x0, sigma0)``
2148
2149 ``es = CMAEvolutionStrategy(x0, sigma0, opts)``
2150
2151 ``res = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct)``
2152
2153 CAVEAT: return value of `optimize` might become ``optim`` in near
2154 future.
2155
2156 Arguments
2157 =========
2158 `x0`
2159 initial solution, starting point. `x0` is given as "genotype"
2160 which means, if::
2161
2162 opts={'transformation':[transform, inverse]}
2163
2164 is given (``inverse`` can be ``None``), then ``transform(x0)``
2165 is the "phenotypic" initial solution and
2166 ``objective_function(transform(x0))`` is the objective
2167 function value of ``x0``.
2168
2169 `sigma0`
2170 initial standard deviation. The problem variables should
2171 have been scaled, such that a single standard deviation
2172 on all variables is useful and the optimum is expected to
2173 lie within about `x0` +- ``3*sigma0``. See also options
2174 `scaling_of_variables`. Often one wants to check for
2175 solutions close to the initial point. This allows,
2176 for example, for an easier check of consistency of the
2177 objective function and its interfacing with the optimizer.
2178 In this case, a much smaller `sigma0` is advisable.
2179 `opts`
2180 options, a dictionary with optional settings,
2181 see class `CMAOptions`.
2182
2183 Main interface / usage
2184 ======================
2185 The interface is inherited from the generic `OOOptimizer`
2186 class (see also there). An object instance is generated from
2187
2188 es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2)
2189
2190 The least verbose interface is via the optimize method::
2191
2192 es.optimize(objective_func)
2193 res = es.result()
2194
2195 More verbosely, the optimization is done using the
2196 methods ``stop``, ``ask``, and ``tell`` ::
2197
2198 while not es.stop():
2199 solutions = es.ask()
2200 es.tell(solutions, [cma.fcts.rosen(s) for s in solutions])
2201
2202
2203 where ``ask`` delivers new candidate solutions and ``tell`` updates
2204 the ``optim`` instance by passing the respective function values
2205 (the objective function ``cma.fcts.rosen`` can be replaced by any
2206 properly defined objective function, see ``cma.fcts`` for more
2207 examples).
2208
2209 The class `CMAEvolutionStrategy` also provides::
2210
2211 (solutions, func_values) = es.ask_and_eval(objective_func)
2212
2213 and an entire optimization can also be written like::
2214
2215 while not es.stop():
2216 es.tell(*es.ask_and_eval(objective_func))
2217
2218 Besides for termination criteria, in CMA-ES only the ranks of the
2219 `func_values` are relevant.
2220
2221 Attributes and Properties
2222 =========================
2223 - `inputargs` -- passed input arguments
2224 - `inopts` -- passed options
2225 - `opts` -- actually used options, some of them can be changed any
2226 time, see class `CMAOptions`
2227 - `popsize` -- population size lambda, number of candidate solutions
2228 returned by `ask()`
2229 - `logger` -- a `CMADataLogger` instance utilized by `optimize`
2230
2231 Examples
2232 ========
2233 Super-short example, with output shown:
2234
2235 >>> import cma
2236 >>> # construct an object instance in 4-D, sigma0=1:
2237 >>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
2238 (4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234)
2239 >>>
2240 >>> # optimize the ellipsoid function
2241 >>> es.optimize(cma.fcts.elli, verb_disp=1)
2242 Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
2243 1 8 2.093015112685775e+04 1.0e+00 9.27e-01 9e-01 9e-01 0:0.0
2244 2 16 4.964814235917688e+04 1.1e+00 9.54e-01 9e-01 1e+00 0:0.0
2245 3 24 2.876682459926845e+05 1.2e+00 1.02e+00 9e-01 1e+00 0:0.0
2246 100 800 6.809045875281943e-01 1.3e+02 1.41e-02 1e-04 1e-02 0:0.2
2247 200 1600 2.473662150861846e-10 8.0e+02 3.08e-05 1e-08 8e-06 0:0.5
2248 233 1864 2.766344961865341e-14 8.6e+02 7.99e-07 8e-11 7e-08 0:0.6
2249 >>>
2250 >>> cma.pprint(es.result())
2251 (array([ -1.98546755e-09, -1.10214235e-09, 6.43822409e-11,
2252 -1.68621326e-11]),
2253 4.5119610261406537e-16,
2254 1666,
2255 1672,
2256 209,
2257 array([ -9.13545269e-09, -1.45520541e-09, -6.47755631e-11,
2258 -1.00643523e-11]),
2259 array([ 3.20258681e-08, 3.15614974e-09, 2.75282215e-10,
2260 3.27482983e-11]))
2261 >>> assert es.result()[1] < 1e-9
2262 >>> help(es.result)
2263 Help on method result in module cma:
2264
2265 result(self) method of cma.CMAEvolutionStrategy instance
2266 return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``
2267
2268
2269 The optimization loop can also be written explicitly.
2270
2271 >>> import cma
2272 >>> es = cma.CMAEvolutionStrategy(4 * [1], 1)
2273 >>> while not es.stop():
2274 ... X = es.ask()
2275 ... es.tell(X, [cma.fcts.elli(x) for x in X])
2276 ... es.disp()
2277 <output omitted>
2278
2279 achieving the same result as above.
2280
2281 An example with lower bounds (at zero) and handling infeasible
2282 solutions:
2283
2284 >>> import cma
2285 >>> import numpy as np
2286 >>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5, {'bounds': [0, np.inf]})
2287 >>> while not es.stop():
2288 ... fit, X = [], []
2289 ... while len(X) < es.popsize:
2290 ... curr_fit = None
2291 ... while curr_fit in (None, np.NaN):
2292 ... x = es.ask(1)[0]
2293 ... curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
2294 ... X.append(x)
2295 ... fit.append(curr_fit)
2296 ... es.tell(X, fit)
2297 ... es.logger.add()
2298 ... es.disp()
2299 <output omitted>
2300 >>>
2301 >>> assert es.result()[1] < 1e-9
2302 >>> assert es.result()[2] < 9000 # by internal termination
2303 >>> # es.logger.plot() # will plot data
2304 >>> # cma.show() # display plot window
2305
2306 An example with user-defined transformation, in this case to realize
2307 a lowwer bound of 2.
2308
2309 >>> es = cma.CMAEvolutionStrategy(5 * [3], 1,
2310 ... {"transformation": [lambda x: x**2+2, None]})
2311 >>> es.optimize(cma.fcts.rosen)
2312 <output omitted>
2313 >>> assert cma.fcts.rosen(es.result()[0]) < 1e-6 + 5.530760944396627e+02
2314 >>> assert es.result()[2] < 3300
2315
2316 The inverse transformation is (only) necessary if the `BoundPenalty`
2317 boundary handler is used at the same time.
2318
2319 The ``CMAEvolutionStrategy`` class also provides a default logger
2320 (cave: files are overwritten when the logger is used with the same
2321 filename prefix):
2322
2323 >>> import cma
2324 >>> es = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
2325 >>> es.logger.disp_header() # to understand the print of disp
2326 Iterat Nfevals function value axis ratio maxstd minstd
2327 >>> while not es.stop():
2328 ... X = es.ask()
2329 ... es.tell(X, [cma.fcts.sphere(x) for x in X])
2330 ... es.logger.add() # log current iteration
2331 ... es.logger.disp([-1]) # display info for last iteration
2332 1 8 2.72769793021748e+03 1.0e+00 4.05e-01 3.99e-01
2333 2 16 6.58755537926063e+03 1.1e+00 4.00e-01 3.39e-01
2334 <output ommitted>
2335 193 1544 3.15195320957214e-15 1.2e+03 3.70e-08 3.45e-11
2336 >>> es.logger.disp_header()
2337 Iterat Nfevals function value axis ratio maxstd minstd
2338 >>> # es.logger.plot() # will make a plot
2339
2340 Example implementing restarts with increasing popsize (IPOP), output
2341 is not displayed:
2342
2343 >>> import cma, numpy as np
2344 >>>
2345 >>> # restart with increasing population size (IPOP)
2346 >>> bestever = cma.BestSolution()
2347 >>> for lam in 10 * 2**np.arange(8): # 10, 20, 40, 80, ..., 10 * 2**7
2348 ... es = cma.CMAEvolutionStrategy('6 - 8 * np.random.rand(9)', # 9-D
2349 ... 5, # initial std sigma0
2350 ... {'popsize': lam, # options
2351 ... 'verb_append': bestever.evalsall})
2352 ... logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
2353 ... while not es.stop():
2354 ... X = es.ask() # get list of new solutions
2355 ... fit = [cma.fcts.rastrigin(x) for x in X] # evaluate each solution
2356 ... es.tell(X, fit) # besides for termination only the ranking in fit is used
2357 ...
2358 ... # display some output
2359 ... logger.add() # add a "data point" to the log, writing in files
2360 ... es.disp() # uses option verb_disp with default 100
2361 ...
2362 ... print('termination:', es.stop())
2363 ... cma.pprint(es.best.__dict__)
2364 ...
2365 ... bestever.update(es.best)
2366 ...
2367 ... # show a plot
2368 ... # logger.plot();
2369 ... if bestever.f < 1e-8: # global optimum was hit
2370 ... break
2371 <output omitted>
2372 >>> assert es.result()[1] < 1e-8
2373
2374 On the Rastrigin function, usually after five restarts the global optimum
2375 is located.
2376
2377 Using the ``multiprocessing`` module, we can evaluate the function in parallel with a simple
2378 modification of the example (however multiprocessing seems not always reliable) ::
2379
2380 try:
2381 import multiprocessing as mp
2382 import cma
2383 es = cma.CMAEvolutionStrategy(22 * [0.0], 1.0, {'maxiter':10})
2384 pool = mp.Pool(es.popsize)
2385 while not es.stop():
2386 X = es.ask()
2387 es.tell(X, pool.map_async(cma.felli, X).get()) # use chunksize parameter as popsize/len(pool)?
2388 es.logger.add()
2389 except ImportError:
2390 pass
2391
2392 The final example shows how to resume:
2393
2394 >>> import cma, pickle
2395 >>>
2396 >>> es = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
2397 ... 0.5) # initial std sigma0
2398 >>> es.optimize(cma.fcts.rosen, iterations=100)
2399 >>> pickle.dump(es, open('saved-cma-object.pkl', 'wb'))
2400 >>> print('saved')
2401 >>> del es # let's start fresh
2402 >>>
2403 >>> es = pickle.load(open('saved-cma-object.pkl', 'rb'))
2404 >>> print('resumed')
2405 >>> es.optimize(cma.fcts.rosen, verb_disp=200)
2406 >>> assert es.result()[2] < 15000
2407 >>> cma.pprint(es.result())
2408
2409 Details
2410 =======
2411 The following two enhancements are implemented, the latter is turned
2412 on by default only for very small population size.
2413
2414 *Active CMA* is implemented with option ``CMA_active`` and
2415 conducts an update of the covariance matrix with negative weights.
2416 The negative update is implemented, such that positive definiteness
2417 is guarantied. The update is applied after the default update and
2418 only before the covariance matrix is decomposed, which limits the
2419 additional computational burden to be at most a factor of three
2420 (typically smaller). A typical speed up factor (number of
2421 f-evaluations) is between 1.1 and two.
2422
2423 References: Jastrebski and Arnold, CEC 2006, Glasmachers et al, GECCO 2010.
2424
2425 *Selective mirroring* is implemented with option ``CMA_mirrors``
2426 in the method ``get_mirror()``. Only the method `ask_and_eval()`
2427 (used by `fmin`) will then sample selectively mirrored vectors. In
2428 selective mirroring, only the worst solutions are mirrored. With
2429 the default small number of mirrors, *pairwise selection* (where at
2430 most one of the two mirrors contribute to the update of the
2431 distribution mean) is implicitly guarantied under selective
2432 mirroring and therefore not explicitly implemented.
2433
2434 References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
2435
2436 :See: `fmin()`, `CMAOptions`, `plot()`, `ask()`, `tell()`, `ask_and_eval()`
2437
2438 """
2439 # ____________________________________________________________
2440 @property # read only attribute decorator for a method
2441 - def popsize(self):
2442 """number of samples by default returned by` ask()`
2443 """
2444 return self.sp.popsize
2445
2446 # this is not compatible with python2.5:
2447 # @popsize.setter
2448 # def popsize(self, p):
2449 # """popsize cannot be set (this might change in future)
2450 # """
2451 # raise _Error("popsize cannot be changed (this might change in future)")
2452
2453 # ____________________________________________________________
2454 # ____________________________________________________________
2455 - def stop(self, check=True):
2456 """return a dictionary with the termination status.
2457 With ``check==False``, the termination conditions are not checked and
2458 the status might not reflect the current situation.
2459
2460 """
2461 if (check and self.countiter > 0 and self.opts['termination_callback'] and
2462 self.opts['termination_callback'] != str(self.opts['termination_callback'])):
2463 self.callbackstop = self.opts['termination_callback'](self)
2464
2465 return self.stopdict(self if check else None) # update the stopdict and return a Dict
2466
2467 # ____________________________________________________________
2468 # ____________________________________________________________
2469 - def __init__(self, x0, sigma0, inopts={}):
2470 """see class `CMAEvolutionStrategy`
2471
2472 """
2473 self.inputargs = dict(locals()) # for the record
2474 del self.inputargs['self'] # otherwise the instance self has a cyclic reference
2475 self.inopts = inopts
2476 opts = CMAOptions(inopts).complement() # CMAOptions() == fmin([],[]) == defaultOptions()
2477
2478 if 'noise_handling' in opts and opts.eval('noise_handling'):
2479 raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
2480 if 'restarts' in opts and opts.eval('restarts'):
2481 raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
2482
2483 self.set_x0(x0) # manage weird shapes, set self.x0
2484 self.N_pheno = len(self.x0)
2485
2486 self.sigma0 = sigma0
2487 if isinstance(sigma0, str): # TODO: no real need here (do rather in fmin)
2488 self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
2489 if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
2490 raise _Error('input argument sigma0 must be (or evaluate to) a scalar')
2491 self.sigma = self.sigma0 # goes to inialize
2492
2493 # extract/expand options
2494 N = self.N_pheno
2495 assert isinstance(opts['fixed_variables'], (basestring, dict)) \
2496 or opts['fixed_variables'] is None
2497 # TODO: in case of a string we need to eval the fixed_variables
2498 if isinstance(opts['fixed_variables'], dict):
2499 N = self.N_pheno - len(opts['fixed_variables'])
2500 opts.evalall(locals()) # using only N
2501 self.opts = opts
2502
2503 self.randn = opts['randn']
2504 self.gp = GenoPheno(self.N_pheno, opts['scaling_of_variables'], opts['typical_x'],
2505 opts['fixed_variables'], opts['transformation'])
2506 self.boundary_handler = opts.eval('boundary_handling')(opts.eval('bounds'))
2507 if not self.boundary_handler.has_bounds():
2508 self.boundary_handler = BoundNone() # just a little faster and well defined
2509 elif not self.boundary_handler.is_in_bounds(self.gp.pheno(self.x0)):
2510 if opts['verbose'] >= 0:
2511 print('WARNING: initial solution is out of the domain boundaries:')
2512 print(' x0 = ' + str(self.gp.pheno(self.x0)))
2513 print(' ldom = ' + str(self.boundary_handler.bounds[0]))
2514 print(' udom = ' + str(self.boundary_handler.bounds[1]))
2515
2516 # self.mean = array(self.x0, copy=True)
2517 tmp, self.gp.tf_geno = self.gp.tf_geno, lambda x: x # a hack to avoid an exception if tf_geno is None
2518 self.mean = self.gp.geno(self.x0, copy_always=True)
2519 self.gp.tf_geno = tmp
2520 # without copy_always interface:
2521 # self.mean = self.gp.geno(array(self.x0, copy=True), copy_if_changed=False)
2522 self.N = len(self.mean)
2523 assert N == self.N
2524 self.fmean = np.NaN # TODO name should change? prints nan in output files (OK with matlab&octave)
2525 self.fmean_noise_free = 0. # for output only
2526
2527 self.adapt_sigma = opts['AdaptSigma']
2528 if self.adapt_sigma is False:
2529 self.adapt_sigma = CMAAdaptSigmaNone
2530 self.adapt_sigma = self.adapt_sigma() # class instance
2531
2532 self.sp = _CMAParameters(N, opts)
2533 self.sp0 = self.sp # looks useless, as it is not a copy
2534
2535 # initialization of state variables
2536 self.countiter = 0
2537 self.countevals = max((0, opts['verb_append'])) \
2538 if not isinstance(opts['verb_append'], bool) else 0
2539 self.pc = np.zeros(N)
2540
2541 self.sigma_vec = np.ones(N) if np.isfinite(self.sp.dampsvec) else 1
2542 stds = np.ones(N)
2543 if self.opts['CMA_teststds'] is not None and np.all(self.opts['CMA_teststds']): # also 0 would not make sense
2544 stds = array(self.opts['CMA_teststds'])
2545 if np.size(stds) != N:
2546 raise _Error('CMA_teststds option must have dimension = ' + str(N))
2547 if self.opts['CMA_diagonal']: # is True or > 0
2548 # linear time and space complexity
2549 self.B = array(1) # works fine with np.dot(self.B, anything) and self.B.T
2550 self.C = stds**2 # TODO: remove this!?
2551 self.dC = self.C
2552 else:
2553 self.B = np.eye(N) # identity(N), do not from matlib import *, as eye is a matrix there
2554 # prevent equal eigenvals, a hack for np.linalg:
2555 self.C = np.diag(stds**2 * exp(1e-6 * (np.random.rand(N) - 0.5)))
2556 self.dC = np.diag(self.C).copy()
2557 self.Yneg = np.zeros((N, N))
2558 self.D = stds
2559
2560 # self.gp.pheno adds fixed variables
2561 relative_stds = ((self.gp.pheno(self.mean + self.sigma * self.sigma_vec * self.D)
2562 - self.gp.pheno(self.mean - self.sigma * self.sigma_vec * self.D)) / 2.0
2563 / (self.boundary_handler.get_bounds('upper', self.N_pheno)
2564 - self.boundary_handler.get_bounds('lower', self.N_pheno)))
2565 if np.any(relative_stds > 1):
2566 raise ValueError('initial standard deviations larger than the bounded domain size in variables '
2567 + str(np.where(relative_stds > 1)[0]))
2568 self.flgtelldone = True
2569 self.itereigenupdated = self.countiter
2570 self.noiseS = 0 # noise "signal"
2571 self.hsiglist = []
2572
2573 if not opts['seed']:
2574 np.random.seed()
2575 six_decimals = (time.time() - 1e6 * (time.time() // 1e6))
2576 opts['seed'] = 1e5 * np.random.rand() + six_decimals + 1e5 * (time.time() % 1)
2577 opts['seed'] = int(opts['seed'])
2578 np.random.seed(opts['seed'])
2579
2580 self.sent_solutions = CMASolutionDict()
2581 self.archive = CMASolutionDict()
2582 self.best = BestSolution()
2583
2584 out = {} # TODO: obsolete, replaced by method results()?
2585 out['best'] = self.best
2586 # out['hsigcount'] = 0
2587 out['termination'] = {}
2588 self.out = out
2589
2590 self.const = _BlancClass()
2591 self.const.chiN = N**0.5 * (1 - 1. / (4.*N) + 1. / (21.*N**2)) # expectation of norm(randn(N,1))
2592
2593 self.logger = CMADataLogger(opts['verb_filenameprefix'], modulo=opts['verb_log']).register(self)
2594
2595 # attribute for stopping criteria in function stop
2596 self.stopdict = CMAStopDict()
2597 self.callbackstop = 0
2598
2599 self.fit = _BlancClass()
2600 self.fit.fit = [] # not really necessary
2601 self.fit.hist = [] # short history of best
2602 self.fit.histbest = [] # long history of best
2603 self.fit.histmedian = [] # long history of median
2604
2605 self.more_to_write = [] # [1, 1, 1, 1] # N*[1] # needed when writing takes place before setting
2606
2607 # say hello
2608 if opts['verb_disp'] > 0 and opts['verbose'] >= 0:
2609 sweighted = '_w' if self.sp.mu > 1 else ''
2610 smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else ''
2611 print('(%d' % (self.sp.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr +
2612 ')-' + ('a' if opts['CMA_active'] else '') + 'CMA-ES' +
2613 ' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.mueff, int(100 * self.sp.weights[0])) +
2614 ' in dimension %d (seed=%d, %s)' % (N, opts['seed'], time.asctime())) # + func.__name__
2615 if opts['CMA_diagonal'] and self.sp.CMA_on:
2616 s = ''
2617 if opts['CMA_diagonal'] is not True:
2618 s = ' for '
2619 if opts['CMA_diagonal'] < np.inf:
2620 s += str(int(opts['CMA_diagonal']))
2621 else:
2622 s += str(np.floor(opts['CMA_diagonal']))
2623 s += ' iterations'
2624 s += ' (1/ccov=' + str(round(1. / (self.sp.c1 + self.sp.cmu))) + ')'
2625 print(' Covariance matrix is diagonal' + s)
2626
2627 - def set_x0(self, x0):
2628 if x0 == str(x0):
2629 x0 = eval(x0)
2630 self.x0 = array(x0) # should not have column or row, is just 1-D
2631 if self.x0.ndim == 2:
2632 if self.opts.eval('verbose') >= 0:
2633 print('WARNING: input x0 should be a list or 1-D array, trying to flatten ' +
2634 str(self.x0.shape) + '-array')
2635 if self.x0.shape[0] == 1:
2636 self.x0 = self.x0[0]
2637 elif self.x0.shape[1] == 1:
2638 self.x0 = array([x[0] for x in self.x0])
2639 if self.x0.ndim != 1:
2640 raise _Error('x0 must be 1-D array')
2641 if len(self.x0) <= 1:
2642 raise _Error('optimization in 1-D is not supported (code was never tested)')
2643 self.x0.resize(self.x0.shape[0]) # 1-D array, not really necessary?!
2644
2645 # ____________________________________________________________
2646 # ____________________________________________________________
2647 - def ask(self, number=None, xmean=None, sigma_fac=1):
2648 """get new candidate solutions, sampled from a multi-variate
2649 normal distribution and transformed to f-representation
2650 (phenotype) to be evaluated.
2651
2652 Arguments
2653 ---------
2654 `number`
2655 number of returned solutions, by default the
2656 population size ``popsize`` (AKA ``lambda``).
2657 `xmean`
2658 distribution mean
2659 `sigma_fac`
2660 multiplier for internal sample width (standard
2661 deviation)
2662
2663 Return
2664 ------
2665 A list of N-dimensional candidate solutions to be evaluated
2666
2667 Example
2668 -------
2669 >>> import cma
2670 >>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3)
2671 >>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value
2672 ... X = es.ask() # get list of new solutions
2673 ... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution
2674 ... es.tell(X, fit) # feed values
2675
2676 :See: `ask_and_eval`, `ask_geno`, `tell`
2677
2678 """
2679 pop_geno = self.ask_geno(number, xmean, sigma_fac)
2680
2681
2682 # N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
2683 # new data: 11.5s vs 9.5s == 20%
2684 # TODO: check here, whether this is necessary?
2685 # return [self.gp.pheno(x, copy=False, into_bounds=self.boundary_handler.repair) for x in pop] # probably fine
2686 # return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
2687 # pop_pheno = [Solution(self.gp.pheno(x, copy=False), copy=False).repair(self.gp.bounds) for x in pop_geno]
2688 pop_pheno = [self.gp.pheno(x, copy=True, into_bounds=self.boundary_handler.repair) for x in pop_geno]
2689
2690 # insert solutions, this could also (better?) be done in self.gp.pheno
2691 for i in rglen((pop_geno)):
2692 self.sent_solutions.insert(pop_pheno[i], geno=pop_geno[i], iteration=self.countiter)
2693 return pop_pheno
2694
2695 # ____________________________________________________________
2696 # ____________________________________________________________
2697 - def ask_geno(self, number=None, xmean=None, sigma_fac=1):
2698 """get new candidate solutions in genotyp, sampled from a
2699 multi-variate normal distribution.
2700
2701 Arguments are
2702 `number`
2703 number of returned solutions, by default the
2704 population size `popsize` (AKA lambda).
2705 `xmean`
2706 distribution mean
2707 `sigma_fac`
2708 multiplier for internal sample width (standard
2709 deviation)
2710
2711 `ask_geno` returns a list of N-dimensional candidate solutions
2712 in genotyp representation and is called by `ask`.
2713
2714 :See: `ask`, `ask_and_eval`
2715
2716 """
2717
2718 if number is None or number < 1:
2719 number = self.sp.popsize
2720 if xmean is None:
2721 xmean = self.mean
2722 else:
2723 try:
2724 xmean = self.archive[xmean]['geno']
2725 # noise handling after call of tell
2726 except KeyError:
2727 try:
2728 xmean = self.sent_solutions[xmean]['geno']
2729 # noise handling before calling tell
2730 except KeyError:
2731 pass
2732
2733 if self.countiter == 0:
2734 self.tic = time.clock() # backward compatible
2735 self.elapsed_time = ElapsedTime()
2736
2737 sigma = sigma_fac * self.sigma
2738
2739 # update parameters for sampling the distribution
2740 # fac 0 1 10
2741 # 150-D cigar:
2742 # 50749 50464 50787
2743 # 200-D elli: == 6.9
2744 # 99900 101160
2745 # 100995 103275 == 2% loss
2746 # 100-D elli: == 6.9
2747 # 363052 369325 < 2% loss
2748 # 365075 365755
2749
2750 # update distribution
2751 if self.sp.CMA_on and (
2752 (self.opts['updatecovwait'] is None and
2753 self.countiter >=
2754 self.itereigenupdated + 1. / (self.sp.c1 + self.sp.cmu) / self.N / 10
2755 ) or
2756 (self.opts['updatecovwait'] is not None and
2757 self.countiter > self.itereigenupdated + self.opts['updatecovwait']
2758 ) or
2759 (self.sp.neg.cmuexp * (self.countiter - self.itereigenupdated) > 0.5
2760 ) # TODO (minor): not sure whether this is "the right" criterion
2761 ):
2762 self.updateBD()
2763 # sample distribution
2764 if self.flgtelldone: # could be done in tell()!?
2765 self.flgtelldone = False
2766 self.ary = []
2767
2768 # each row is a solution
2769 arz = self.randn((number, self.N))
2770 # zzzzzzzzzzzzzzzzzzzzzzzzzzz
2771 if self.opts['CMA_sample_on_sphere_surface']: # normalize the length to chiN
2772 for i in rglen((arz)):
2773 ss = sum(arz[i]**2)
2774 if 1 < 3 or ss > self.N + 10.1:
2775 arz[i] *= (self.N**0.5 if self.opts['CSA_squared'] else self.const.chiN) / ss**0.5
2776 # or to average
2777 # arz *= 1 * self.const.chiN / np.mean([sum(z**2)**0.5 for z in arz])
2778
2779 # fac = np.mean(sum(arz**2, 1)**0.5)
2780 # print fac
2781 # arz *= self.const.chiN / fac
2782 ary = self.sigma_vec * np.dot(self.B, (self.D * arz).T).T
2783 if number > 2 and self.countiter > 2:
2784 if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
2785 self.opts['mean_shift_line_samples'] or
2786 self.opts['pc_line_samples']):
2787 ys = []
2788 if self.opts['pc_line_samples']:
2789 ys.append(self.pc[:]) # now TPA is with pc_line_samples
2790 if self.opts['mean_shift_line_samples']:
2791 ys.append(self.mean - self.mean_old)
2792 if not len(ys):
2793 ys.append(self.mean - self.mean_old)
2794 # assign a mirrored pair from each element of ys into ary
2795 for i, y in enumerate(ys):
2796 if len(arz) > 2 * i + 1: # at least two more samples
2797 assert y is not self.pc
2798 y *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobisNorm(y)
2799 # TODO: rescale y depending on some parameter?
2800 ary[2*i] = y / self.sigma
2801 ary[2*i + 1] = y / -self.sigma
2802 else:
2803 _print_warning('line samples omitted due to small popsize',
2804 method_name='ask_geno', iteration=self.countiter)
2805
2806 pop = xmean + sigma * ary
2807 self.evaluations_per_f_value = 1
2808 self.ary = ary # ask_geno is called recursively in CMAAdaptSigmaTPA
2809 if number == self.sp.popsize:
2810 self.arz = arz # is never used
2811 return pop
2812
2813 - def get_mirror(self, x, preserve_length=False):
2814 """return ``pheno(self.mean - (geno(x) - self.mean))``.
2815
2816 >>> import cma
2817 >>> es = cma.CMAEvolutionStrategy(cma.np.random.randn(3), 1)
2818 >>> x = cma.np.random.randn(3)
2819 >>> assert cma.Mh.vequals_approximately(es.mean - (x - es.mean), es.get_mirror(x, preserve_length=True))
2820 >>> x = es.ask(1)[0]
2821 >>> vals = (es.get_mirror(x) - es.mean) / (x - es.mean)
2822 >>> assert cma.Mh.equals_approximately(sum(vals), len(vals) * vals[0])
2823
2824 TODO: this implementation is yet experimental.
2825
2826 Selectively mirrored sampling improves to a moderate extend but
2827 overadditively with active CMA for quite understandable reasons.
2828
2829 Optimal number of mirrors are suprisingly small: 1,2,3 for maxlam=7,13,20
2830 however note that 3,6,10 are the respective maximal possible mirrors that
2831 must be clearly suboptimal.
2832
2833 """
2834 try:
2835 dx = self.sent_solutions[x]['geno'] - self.mean
2836 except: # can only happen with injected solutions?!
2837 dx = self.gp.geno(x, from_bounds=self.boundary_handler.inverse, copy_if_changed=True) - self.mean
2838
2839 if not preserve_length:
2840 dx *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobisNorm(dx)
2841 x = self.mean - dx
2842 y = self.gp.pheno(x, into_bounds=self.boundary_handler.repair)
2843 # old measure: costs 25% in CPU performance with N,lambda=20,200
2844 self.sent_solutions.insert(y, geno=x, iteration=self.countiter)
2845 return y
2846
2847 - def mirror_penalized(self, f_values, idx):
2848 """obsolete and subject to removal (TODO),
2849 return modified f-values such that for each mirror one becomes worst.
2850
2851 This function is useless when selective mirroring is applied with no
2852 more than (lambda-mu)/2 solutions.
2853
2854 Mirrors are leading and trailing values in ``f_values``.
2855
2856 """
2857 assert len(f_values) >= 2 * len(idx)
2858 m = np.max(np.abs(f_values))
2859 for i in len(idx):
2860 if f_values[idx[i]] > f_values[-1 - i]:
2861 f_values[idx[i]] += m
2862 else:
2863 f_values[-1 - i] += m
2864 return f_values
2865
2866 - def mirror_idx_cov(self, f_values, idx1): # will most likely be removed
2867 """obsolete and subject to removal (TODO),
2868 return indices for negative ("active") update of the covariance matrix
2869 assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
2870 the corresponding mirrored values
2871
2872 computes the index of the worse solution sorted by the f-value of the
2873 better solution.
2874
2875 TODO: when the actual mirror was rejected, it is better
2876 to return idx1 instead of idx2.
2877
2878 Remark: this function might not be necessary at all: if the worst solution
2879 is the best mirrored, the covariance matrix updates cancel (cave: weights
2880 and learning rates), which seems what is desirable. If the mirror is bad,
2881 as strong negative update is made, again what is desirable.
2882 And the fitness--step-length correlation is in part addressed by
2883 using flat weights.
2884
2885 """
2886 idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
2887 f = []
2888 for i in rglen((idx1)):
2889 f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
2890 # idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
2891 return idx2[np.argsort(f)][-1::-1]
2892
2893 - def eval_mean(self, func, args=()):
2894 """evaluate the distribution mean, this is not (yet) effective
2895 in terms of termination or display"""
2896 self.fmean = func(self.mean, *args)
2897
2898 # ____________________________________________________________
2899 # ____________________________________________________________
2900 #
2901 - def ask_and_eval(self, func, args=(), number=None, xmean=None, sigma_fac=1,
2902 evaluations=1, aggregation=np.median, kappa=1):
2903 """samples `number` solutions and evaluates them on `func`, where
2904 each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``.
2905
2906 Arguments
2907 ---------
2908 `func`
2909 objective function
2910 `args`
2911 additional parameters for `func`
2912 `number`
2913 number of solutions to be sampled, by default
2914 population size ``popsize`` (AKA lambda)
2915 `xmean`
2916 mean for sampling the solutions, by default ``self.mean``.
2917 `sigma_fac`
2918 multiplier for sampling width, standard deviation, for example
2919 to get a small perturbation of solution `xmean`
2920 `evaluations`
2921 number of evaluations for each sampled solution
2922 `aggregation`
2923 function that aggregates `evaluations` values to
2924 as single value.
2925 `kappa`
2926 multiplier used for the evaluation of the solutions, in
2927 that ``func(m + kappa*(x - m))`` is the f-value for x.
2928
2929 Return
2930 ------
2931 ``(X, fit)``, where
2932 X -- list of solutions
2933 fit -- list of respective function values
2934
2935 Details
2936 -------
2937 While ``not self.is_feasible(x, func(x))``new solutions are sampled. By
2938 default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``.
2939 The argument to `func` can be freely modified within `func`.
2940
2941 Depending on the ``CMA_mirrors`` option, some solutions are not sampled
2942 independently but as mirrors of other bad solutions. This is a simple
2943 derandomization that can save 10-30% of the evaluations in particular
2944 with small populations, for example on the cigar function.
2945
2946 Example
2947 -------
2948 >>> import cma
2949 >>> x0, sigma0 = 8*[10], 1 # 8-D
2950 >>> es = cma.CMAEvolutionStrategy(x0, sigma0)
2951 >>> while not es.stop():
2952 ... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
2953 ... es.tell(X, fit) # pass on fitness values
2954 ... es.disp(20) # print every 20-th iteration
2955 >>> print('terminated on ' + str(es.stop()))
2956 <output omitted>
2957
2958 A single iteration step can be expressed in one line, such that
2959 an entire optimization after initialization becomes
2960 ::
2961
2962 while not es.stop():
2963 es.tell(*es.ask_and_eval(cma.fcts.elli))
2964
2965 """
2966 # initialize
2967 popsize = self.sp.popsize
2968 if number is not None:
2969 popsize = number
2970 selective_mirroring = True
2971 nmirrors = self.sp.lam_mirr
2972 if popsize != self.sp.popsize:
2973 nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
2974 # TODO: now selective mirroring might be impaired
2975 assert nmirrors <= popsize // 2
2976 self.mirrors_idx = np.arange(nmirrors) # might never be used
2977 self.mirrors_rejected_idx = [] # might never be used
2978 if xmean is None:
2979 xmean = self.mean
2980 is_feasible = self.opts['is_feasible']
2981
2982 # do the work
2983 fit = [] # or np.NaN * np.empty(number)
2984 X_first = self.ask(popsize)
2985 X = []
2986 for k in xrange(int(popsize)):
2987 x, f = X_first.pop(0), None
2988 nreject = -1
2989 while nreject < 0 or not is_feasible(x, f): # rejection sampling
2990 nreject += 1
2991 if nreject: # resample
2992 x = self.ask(1, xmean, sigma_fac)[0]
2993 elif k >= popsize - nmirrors: # mirrored sample
2994 if k == popsize - nmirrors and selective_mirroring:
2995 self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1]
2996 x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
2997 if nreject == 1 and k >= popsize - nmirrors:
2998 self.mirrors_rejected_idx.append(k)
2999
3000 # contraints handling test hardwired ccccccccccc
3001 length_normalizer = 1
3002 # zzzzzzzzzzzzzzzzzzzzzzzzz
3003 f = func(x, *args) if kappa == 1 else func(xmean + kappa * length_normalizer * (x - xmean), *args)
3004 if is_feasible(x, f) and evaluations > 1:
3005 f = aggregation([f] + [(func(x, *args) if kappa == 1 else func(xmean + kappa * length_normalizer * (x - xmean), *args)) for _i in xrange(int(evaluations - 1))])
3006 if nreject + 1 % 1000 == 0:
3007 print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
3008 (nreject, self.countiter))
3009 fit.append(f)
3010 X.append(x)
3011 self.evaluations_per_f_value = int(evaluations)
3012 return X, fit
3013
3014
3015 # ____________________________________________________________
3016 - def tell(self, solutions, function_values, check_points=None, copy=False):
3017 """pass objective function values to prepare for next
3018 iteration. This core procedure of the CMA-ES algorithm updates
3019 all state variables, in particular the two evolution paths, the
3020 distribution mean, the covariance matrix and a step-size.
3021
3022 Arguments
3023 ---------
3024 `solutions`
3025 list or array of candidate solution points (of
3026 type `numpy.ndarray`), most presumably before
3027 delivered by method `ask()` or `ask_and_eval()`.
3028 `function_values`
3029 list or array of objective function values
3030 corresponding to the respective points. Beside for termination
3031 decisions, only the ranking of values in `function_values`
3032 is used.
3033 `check_points`
3034 If ``check_points is None``, only solutions that are not generated
3035 by `ask()` are possibly clipped (recommended). ``False`` does not clip
3036 any solution (not recommended).
3037 If ``True``, clips solutions that realize long steps (i.e. also
3038 those that are unlikely to be generated with `ask()`). `check_points`
3039 can be a list of indices to be checked in solutions.
3040 `copy`
3041 ``solutions`` can be modified in this routine, if ``copy is False``
3042 `AdaptSigma`:
3043 sigma adaptation class like ``CMAAdaptSigmaCSA``, with an adhoc interface
3044 very specific to the ``CMAEvolutionStrategy.tell`` method
3045 (this interface might change in future). Overwrites `self.AdaptSigma`.
3046
3047 Details
3048 -------
3049 `tell()` updates the parameters of the multivariate
3050 normal search distribution, namely covariance matrix and
3051 step-size and updates also the attributes `countiter` and
3052 `countevals`. To check the points for consistency is quadratic
3053 in the dimension (like sampling points).
3054
3055 Bugs
3056 ----
3057 The effect of changing the solutions delivered by `ask()` depends on whether
3058 boundary handling is applied. With boundary handling, modifications are
3059 disregarded. This is necessary to apply the default boundary handling that
3060 uses unrepaired solutions but might change in future.
3061
3062 Example
3063 -------
3064 ::
3065
3066 import cma
3067 func = cma.fcts.elli # choose objective function
3068 es = cma.CMAEvolutionStrategy(cma.np.random.rand(10), 1)
3069 while not es.stop():
3070 X = es.ask()
3071 es.tell(X, [func(x) for x in X])
3072 es.result() # where the result can be found
3073
3074 :See: class `CMAEvolutionStrategy`, `ask()`, `ask_and_eval()`, `fmin()`
3075
3076 """
3077 if self.flgtelldone:
3078 raise _Error('tell should only be called once per iteration')
3079
3080 lam = len(solutions)
3081 if lam != array(function_values).shape[0]:
3082 raise _Error('for each candidate solution '
3083 + 'a function value must be provided')
3084 if lam + self.sp.lam_mirr < 3:
3085 raise _Error('population size ' + str(lam) + ' is too small when option CMA_mirrors * popsize < 0.5')
3086
3087 if not np.isscalar(function_values[0]):
3088 if np.isscalar(function_values[0][0]):
3089 if self.countiter <= 1:
3090 print('WARNING: function values are not a list of scalars (further warnings are suppressed)')
3091 function_values = [val[0] for val in function_values]
3092 else:
3093 raise _Error('objective function values must be a list of scalars')
3094
3095
3096 # ## prepare
3097 N = self.N
3098 sp = self.sp
3099 if lam < sp.mu: # rather decrease cmean instead of having mu > lambda//2
3100 raise _Error('not enough solutions passed to function tell (mu>lambda)')
3101
3102 self.countiter += 1 # >= 1 now
3103 self.countevals += sp.popsize * self.evaluations_per_f_value
3104 self.best.update(solutions, self.sent_solutions, function_values, self.countevals)
3105
3106 flgseparable = self.opts['CMA_diagonal'] is True \
3107 or self.countiter <= self.opts['CMA_diagonal']
3108 if not flgseparable and len(self.C.shape) == 1: # C was diagonal ie 1-D
3109 # enter non-separable phase (no easy return from here)
3110 self.B = np.eye(N) # identity(N)
3111 self.C = np.diag(self.C)
3112 idx = np.argsort(self.D)
3113 self.D = self.D[idx]
3114 self.B = self.B[:, idx]
3115 self.Yneg = np.zeros((N, N))
3116
3117 # ## manage fitness
3118 fit = self.fit # make short cut
3119
3120 # CPU for N,lam=20,200: this takes 10s vs 7s
3121 fit.bndpen = self.boundary_handler.update(function_values, self)(solutions, self.sent_solutions, self.gp)
3122 # for testing:
3123 # fit.bndpen = self.boundary_handler.update(function_values, self)([s.unrepaired for s in solutions])
3124 fit.idx = np.argsort(array(fit.bndpen) + array(function_values))
3125 fit.fit = array(function_values, copy=False)[fit.idx]
3126
3127 # update output data TODO: this is obsolete!? However: need communicate current best x-value?
3128 # old: out['recent_x'] = self.gp.pheno(pop[0])
3129 self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify
3130 self.out['recent_f'] = fit.fit[0]
3131
3132 # fitness histories
3133 fit.hist.insert(0, fit.fit[0])
3134 # if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case
3135 if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen.
3136 fit.histbest.insert(0, fit.fit[0])
3137 fit.histmedian.insert(0, np.median(fit.fit) if len(fit.fit) < 21
3138 else fit.fit[self.popsize // 2])
3139 if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize:
3140 fit.histbest.pop()
3141 fit.histmedian.pop()
3142 if len(fit.hist) > 10 + 30 * N / sp.popsize:
3143 fit.hist.pop()
3144
3145 # TODO: clean up inconsistency when an unrepaired solution is available and used
3146 # now get the genotypes
3147 pop = [] # create pop from input argument solutions
3148 for k, s in enumerate(solutions): # use phenotype before Solution.repair()
3149 if 1 < 3:
3150 pop += [self.gp.geno(s,
3151 from_bounds=self.boundary_handler.inverse,
3152 repair=(self.repair_genotype if check_points not in (False, 0, [], ()) else None),
3153 archive=self.sent_solutions)] # takes genotype from sent_solutions, if available
3154 try:
3155 self.archive.insert(s, value=self.sent_solutions.pop(s), fitness=function_values[k])
3156 # self.sent_solutions.pop(s)
3157 except KeyError:
3158 pass
3159 try:
3160 moldold = self.mean_old
3161 except:
3162 pass
3163 self.mean_old = self.mean
3164 mold = self.mean_old # just an alias
3165
3166 # check and normalize each x - m
3167 # check_points is a flag (None is default: check non-known solutions) or an index list
3168 # should also a number possible (first check_points points)?
3169 if check_points not in (None, False, 0, [], ()): # useful in case of injected solutions and/or adaptive encoding, however is automatic with use_sent_solutions
3170 try:
3171 if len(check_points):
3172 idx = check_points
3173 except:
3174 idx = xrange(sp.popsize)
3175
3176 for k in idx:
3177 self.repair_genotype(pop[k])
3178
3179 # only arrays can be multiple indexed
3180 pop = array(pop, copy=False)
3181
3182 # sort pop
3183 pop = pop[fit.idx]
3184
3185 if self.opts['CMA_elitist'] and self.best.f < fit.fit[0]:
3186 if self.best.x_geno is not None:
3187 xp = [self.best.x_geno]
3188 # xp = [self.best.xdict['geno']]
3189 # xp = [self.gp.geno(self.best.x[:])] # TODO: remove
3190 # print self.mahalanobisNorm(xp[0]-self.mean)
3191 else:
3192 xp = [self.gp.geno(array(self.best.x, copy=True), self.boundary_handler.inverse, copy_if_changed=False)]
3193 print('genotype for elitist not found')
3194 self.clip_or_fit_solutions(xp, [0])
3195 pop = array([xp[0]] + list(pop))
3196
3197 # compute new mean
3198 self.mean = mold + self.sp.cmean * \
3199 (sum(sp.weights * pop[0:sp.mu].T, 1) - mold)
3200
3201
3202 # check Delta m (this is not default, but could become at some point)
3203 # CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly.
3204 # replaced by repair_geno?
3205 # simple test case injecting self.mean:
3206 # self.mean = 1e-4 * self.sigma * np.random.randn(N)
3207 if 1 < 3:
3208 cmean = self.sp.cmean
3209
3210 # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3211 # get learning rate constants
3212 cc, c1, cmu = sp.cc, sp.c1, sp.cmu
3213 if flgseparable:
3214 cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep
3215
3216 # now the real work can start
3217
3218 hsig = self.adapt_sigma.hsig(self) # ps update must be done here in separable case
3219
3220 # hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1)
3221 # adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small
3222 # hsig leads to premature convergence of C otherwise
3223 # hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future
3224 c1a = c1 - (1 - hsig**2) * c1 * cc * (2 - cc) # adjust for variance loss
3225
3226 self.pc = (1 - cc) * self.pc + \
3227 hsig * (sqrt(cc * (2 - cc) * sp.mueff) / self.sigma / cmean) * \
3228 (self.mean - mold) / self.sigma_vec
3229
3230 # covariance matrix adaptation/udpate
3231 if sp.CMA_on:
3232 # assert sp.c1 + sp.cmu < sp.mueff / N # ??
3233 assert c1 + cmu <= 1
3234
3235 # default full matrix case
3236 if not flgseparable:
3237 Z = (pop[0:sp.mu] - mold) / (self.sigma * self.sigma_vec)
3238 Z = dot((cmu * sp.weights) * Z.T, Z) # learning rate integrated
3239 if self.sp.neg.cmuexp:
3240 tmp = (pop[-sp.neg.mu:] - mold) / (self.sigma * self.sigma_vec)
3241 if 1 < 3: # normalize to constant length (seems preferable in several aspects)
3242 # print(tmp.shape)
3243 for i in range(tmp.shape[0]):
3244 tmp[i, :] *= N**0.5 / self.mahalanobisNorm(tmp[i, :]) / (self.sigma * self.sigma_vec)
3245 # print(tmp.shape)
3246 self.Yneg *= 1 - self.sp.neg.cmuexp # for some reason necessary?
3247 self.Yneg += dot(sp.neg.weights * tmp.T, tmp) - self.C
3248 # self.update_exponential(dot(sp.neg.weights * tmp.T, tmp) - 1 * self.C, -1*self.sp.neg.cmuexp)
3249
3250 self.C *= 1 - c1a - cmu
3251 self.C += np.outer(c1 * self.pc, self.pc) + Z
3252 self.dC = np.diag(self.C).copy() # for output and termination checking
3253
3254 else: # separable/diagonal linear case
3255 assert(c1 + cmu <= 1)
3256 Z = np.zeros(N)
3257 for k in xrange(sp.mu):
3258 z = (pop[k] - mold) / (self.sigma * self.sigma_vec) # TODO see above
3259 Z += sp.weights[k] * z * z # is 1-D
3260 self.C = (1 - c1a - cmu) * self.C + c1 * self.pc * self.pc + cmu * Z
3261 # TODO: self.C *= exp(cmuneg * (N - dot(sp.neg.weights, **2)
3262 self.dC = self.C
3263 self.D = sqrt(self.C) # C is a 1-D array, this is why adapt_sigma needs to prepare before
3264 self.itereigenupdated = self.countiter
3265
3266 # idx = self.mirror_idx_cov() # take half of mirrored vectors for negative update
3267
3268 # step-size adaptation, adapt sigma
3269 # in case of TPA, function_values[0] and [1] must reflect samples colinear to xmean - xmean_old
3270 self.adapt_sigma.update(self, function_values=function_values)
3271
3272 if self.sigma * min(self.dC)**0.5 < self.opts['minstd']:
3273 self.sigma = self.opts['minstd'] / min(self.dC)**0.5
3274 # g = self.countiter
3275 # N = self.N
3276 mindx = eval(self.opts['mindx']) if isinstance(self.opts['mindx'], basestring) else self.opts['mindx']
3277 if self.sigma * min(self.D) < mindx: # TODO: sigma_vec is missing here
3278 self.sigma = mindx / min(self.D)
3279
3280 if self.sigma > 1e9 * self.sigma0:
3281 alpha = self.sigma / max(self.D)
3282 self.multiplyC(alpha)
3283 self.sigma /= alpha**0.5
3284 self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
3285
3286 # TODO increase sigma in case of a plateau?
3287
3288 # Uncertainty noise measurement is done on an upper level
3289
3290 self.flgtelldone = True
3291 # end tell()
3292
3293 - def result(self):
3294 """return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``"""
3295 # TODO: how about xcurrent?
3296 return self.best.get() + (
3297 self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)
3298
3299
3300 - def result_pretty(self, number_of_runs=0, time_str=None):
3301 """pretty print result. """
3302 s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
3303 % number_of_runs if number_of_runs else ''
3304 for k, v in self.stop().items():
3305 print('termination on %s=%s%s' % (k, str(v), s) +
3306 (' (%s)' % time_str if time_str else ''))
3307
3308 print('final/bestever f-value = %e %e' % (self.best.last.f, self.best.f))
3309 if self.N < 9:
3310 print('mean solution: ' + str(self.gp.pheno(self.mean)))
3311 print('std deviation: ' + str(self.sigma * sqrt(self.dC) * self.gp.scales))
3312 else:
3313 print('mean solution: %s ...]' % (str(self.gp.pheno(self.mean)[:8])[:-1]))
3314 print('std deviations: %s ...]' % (str((self.sigma * sqrt(self.dC) * self.gp.scales)[:8])[:-1]))
3315
3316
3317 - def clip_or_fit_solutions(self, pop, idx):
3318 """make sure that solutions fit to sample distribution, this interface will probably change.
3319
3320 In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
3321
3322 """
3323 for k in idx:
3324 self.repair_genotype(pop[k])
3325
3326 - def repair_genotype(self, x, copy_if_changed=False):
3327 """make sure that solutions fit to the sample distribution, this interface will probably change.
3328
3329 In particular the frequency of x - self.mean being long is limited.
3330
3331 """
3332 mold = self.mean
3333 if 1 < 3: # hard clip at upper_length
3334 upper_length = self.N**0.5 + 2 * self.N / (self.N + 2) # should become an Option, but how? e.g. [0, 2, 2]
3335 fac = self.mahalanobisNorm(x - mold) / upper_length
3336
3337 if fac > 1:
3338 if copy_if_changed:
3339 x = (x - mold) / fac + mold
3340 else: # should be 25% faster:
3341 x -= mold
3342 x /= fac
3343 x += mold
3344 # print self.countiter, k, fac, self.mahalanobisNorm(pop[k] - mold)
3345 # adapt also sigma: which are the trust-worthy/injected solutions?
3346 else:
3347 if 'checktail' not in self.__dict__: # hasattr(self, 'checktail')
3348 raise NotImplementedError
3349 # from check_tail_smooth import CheckTail # for the time being
3350 # self.checktail = CheckTail()
3351 # print('untested feature checktail is on')
3352 fac = self.checktail.addchin(self.mahalanobisNorm(x - mold))
3353
3354 if fac < 1:
3355 x = fac * (x - mold) + mold
3356
3357 return x
3358
3359 # ____________________________________________________________
3360 # ____________________________________________________________
3361 #
3362 - def updateBD(self):
3363 """update internal variables for sampling the distribution with the
3364 current covariance matrix C. This method is O(N^3), if C is not diagonal.
3365
3366 """
3367 # itereigenupdated is always up-to-date in the diagonal case
3368 # just double check here
3369 if self.itereigenupdated == self.countiter:
3370 return
3371
3372 # C has already positive updates, here come the additional negative updates
3373 if self.sp.neg.cmuexp: # cave:
3374 if (self.countiter - self.itereigenupdated) * self.sp.neg.cmuexp * self.N < 0.5: # pos.def. guarantied
3375 self.C -= self.sp.neg.cmuexp * self.Yneg
3376 else: # guaranties pos.def. unconditionally
3377 # print('exponential update for negative weights (internally more expensive) in iteration', self.countiter)
3378 self.update_exponential(self.Yneg, -self.sp.neg.cmuexp)
3379 # self.C = self.Ypos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Yneg*Csi) * Cs
3380 self.Yneg = np.zeros((self.N, self.N))
3381
3382 if self.sigma_vec is not 1 and not np.all(self.sigma_vec == 1):
3383 self.C = dot(dot(np.diag(self.sigma_vec), self.C), np.diag(self.sigma_vec))
3384 self.sigma_vec[:] = 1
3385
3386 if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C
3387 if self.opts['CMA_const_trace'] == 2:
3388 s = np.exp(np.mean(np.log(self.dC)))
3389 else:
3390 s = np.mean(self.dC)
3391 self.C /= s
3392 self.dC /= s
3393 self.C = (self.C + self.C.T) / 2
3394 # self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well
3395 # self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed
3396
3397 if isinstance(self.opts['CMA_eigenmethod'], type(1)):
3398 print('WARNING: option CMA_eigenmethod should be a function, not an integer')
3399 if self.opts['CMA_eigenmethod'] == -1:
3400 # pygsl
3401 # easy to install (well, in Windows install gsl binaries first,
3402 # set system path to respective libgsl-0.dll (or cp the dll to
3403 # python\DLLS ?), in unzipped pygsl edit
3404 # gsl_dist/gsl_site_example.py into gsl_dist/gsl_site.py
3405 # and run "python setup.py build" and "python setup.py install"
3406 # in MINGW32)
3407 if 1 < 3: # import pygsl on the fly
3408 try:
3409 import pygsl.eigen.eigenvectors # TODO efficient enough?
3410 except ImportError:
3411 print('WARNING: could not find pygsl.eigen module, either install pygsl \n' +
3412 ' or set option CMA_eigenmethod=1 (is much slower), option set to 1')
3413 self.opts['CMA_eigenmethod'] = 0 # use 0 if 1 is too slow
3414
3415 self.D, self.B = pygsl.eigen.eigenvectors(self.C)
3416
3417 elif self.opts['CMA_eigenmethod'] == 0:
3418 # TODO: thoroughly test np.linalg.eigh
3419 # numpy.linalg.eig crashes in 200-D
3420 # and EVecs with same EVals are not orthogonal
3421 self.D, self.B = np.linalg.eigh(self.C) # self.B[i] is a row and not an eigenvector
3422 else: # is overall two;ten times slower in 10;20-D
3423 self.D, self.B = Misc.eig(self.C) # def eig, see below
3424 else:
3425 self.D, self.B = self.opts['CMA_eigenmethod'](self.C)
3426
3427
3428 # assert(sum(self.D-DD) < 1e-6)
3429 # assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)
3430 # assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)
3431 idx = np.argsort(self.D)
3432 self.D = self.D[idx]
3433 self.B = self.B[:, idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors
3434 # assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))
3435
3436 # qqqqqqqqqq
3437 # is O(N^3)
3438 # assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)
3439
3440 self.D **= 0.5
3441 self.itereigenupdated = self.countiter
3442
3443 - def multiplyC(self, alpha):
3444 """multiply C with a scalar and update all related internal variables (dC, D,...)"""
3445 self.C *= alpha
3446 if self.dC is not self.C:
3447 self.dC *= alpha
3448 self.D *= alpha**0.5
3449 - def update_exponential(self, Z, eta, BDpair=None):
3450 """exponential update of C that guarantees positive definiteness, that is,
3451 instead of the assignment ``C = C + eta * Z``,
3452 we have ``C = C**.5 * exp(eta * C**-.5 * Z * C**-.5) * C**.5``.
3453
3454 Parameter `Z` should have expectation zero, e.g. sum(w[i] * z[i] * z[i].T) - C
3455 if E z z.T = C.
3456
3457 Parameter `eta` is the learning rate, for ``eta == 0`` nothing is updated.
3458
3459 This function conducts two eigendecompositions, assuming that
3460 B and D are not up to date, unless `BDpair` is given. Given BDpair,
3461 B is the eigensystem and D is the vector of sqrt(eigenvalues), one
3462 eigendecomposition is omitted.
3463
3464 Reference: Glasmachers et al 2010, Exponential Natural Evolution Strategies
3465
3466 """
3467 if eta == 0:
3468 return
3469 if BDpair:
3470 B, D = BDpair
3471 else:
3472 D, B = self.opts['CMA_eigenmethod'](self.C)
3473 D **= 0.5
3474 Cs = dot(B, (B * D).T) # square root of C
3475 Csi = dot(B, (B / D).T) # square root of inverse of C
3476 self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)), self.opts['CMA_eigenmethod']), Cs))
3477
3478 # ____________________________________________________________
3479 # ____________________________________________________________
3480 - def feedForResume(self, X, function_values):
3481 """Given all "previous" candidate solutions and their respective
3482 function values, the state of a `CMAEvolutionStrategy` object
3483 can be reconstructed from this history. This is the purpose of
3484 function `feedForResume`.
3485
3486 Arguments
3487 ---------
3488 `X`
3489 (all) solution points in chronological order, phenotypic
3490 representation. The number of points must be a multiple
3491 of popsize.
3492 `function_values`
3493 respective objective function values
3494
3495 Details
3496 -------
3497 `feedForResume` can be called repeatedly with only parts of
3498 the history. The part must have the length of a multiple
3499 of the population size.
3500 `feedForResume` feeds the history in popsize-chunks into `tell`.
3501 The state of the random number generator might not be
3502 reconstructed, but this would be only relevant for the future.
3503
3504 Example
3505 -------
3506 ::
3507
3508 import cma
3509
3510 # prepare
3511 (x0, sigma0) = ... # initial values from previous trial
3512 X = ... # list of generated solutions from a previous trial
3513 f = ... # respective list of f-values
3514
3515 # resume
3516 es = cma.CMAEvolutionStrategy(x0, sigma0)
3517 es.feedForResume(X, f)
3518
3519 # continue with func as objective function
3520 while not es.stop():
3521 X = es.ask()
3522 es.tell(X, [func(x) for x in X])
3523
3524 Credits to Dirk Bueche and Fabrice Marchal for the feeding idea.
3525
3526 :See: class `CMAEvolutionStrategy` for a simple dump/load to resume
3527
3528 """
3529 if self.countiter > 0:
3530 print('WARNING: feed should generally be used with a new object instance')
3531 if len(X) != len(function_values):
3532 raise _Error('number of solutions ' + str(len(X)) +
3533 ' and number function values ' +
3534 str(len(function_values)) + ' must not differ')
3535 popsize = self.sp.popsize
3536 if (len(X) % popsize) != 0:
3537 raise _Error('number of solutions ' + str(len(X)) +
3538 ' must be a multiple of popsize (lambda) ' +
3539 str(popsize))
3540 for i in rglen((X) / popsize):
3541 # feed in chunks of size popsize
3542 self.ask() # a fake ask, mainly for a conditioned calling of updateBD
3543 # and secondary to get possibly the same random state
3544 self.tell(X[i * popsize:(i + 1) * popsize], function_values[i * popsize:(i + 1) * popsize])
3545
3546 # ____________________________________________________________
3547 # ____________________________________________________________
3548 - def readProperties(self):
3549 """reads dynamic parameters from property file (not implemented)
3550 """
3551 print('not yet implemented')
3552
3553 # ____________________________________________________________
3554 # ____________________________________________________________
3555 - def mahalanobisNorm(self, dx):
3556 """compute the Mahalanobis norm that is induced by the adapted sample
3557 distribution, covariance matrix C times sigma**2. The expected
3558 Mahalanobis distance to the sample mean is about sqrt(dimension).
3559
3560 Argument
3561 --------
3562 A *genotype* difference `dx`.
3563
3564 Example
3565 -------
3566 >>> import cma, numpy
3567 >>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
3568 >>> xx = numpy.random.randn(2, 10)
3569 >>> d = es.mahalanobisNorm(es.gp.geno(xx[0]-xx[1]))
3570
3571 `d` is the distance "in" the true sample distribution,
3572 sampled points have a typical distance of ``sqrt(2*es.N)``,
3573 where `N` is the dimension, and an expected distance of
3574 close to ``sqrt(N)`` to the sample mean. In the example,
3575 `d` is the Euclidean distance, because C = I and sigma = 1.
3576
3577 """
3578 return sqrt(sum((self.D**-1 * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma
3579
3580 # ____________________________________________________________
3581 # ____________________________________________________________
3582 #
3583 - def timesCroot(self, mat):
3584 """return C**0.5 times mat, where mat can be a vector or matrix.
3585 Not functional, because _Croot=C**0.5 is never computed (should be in updateBD)
3586 """
3587 print("WARNING: timesCroot is not yet tested")
3588 if self.opts['CMA_diagonal'] is True \
3589 or self.countiter <= self.opts['CMA_diagonal']:
3590 res = (self._Croot * mat.T).T
3591 else:
3592 res = np.dot(self._Croot, mat)
3593 return res
3594 - def divCroot(self, mat):
3595 """return C**-1/2 times mat, where mat can be a vector or matrix.
3596 Not functional, because _Crootinv is never computed. """
3597 print("WARNING: divCroot is not yet tested")
3598 if self.opts['CMA_diagonal'] is True \
3599 or self.countiter <= self.opts['CMA_diagonal']:
3600 res = (self._Crootinv * mat.T).T
3601 else:
3602 res = np.dot(self._Crootinv, mat)
3603 return res
3604
3605 # ____________________________________________________________
3606 # ____________________________________________________________
3607 - def disp_annotation(self):
3608 """print annotation for `disp()`"""
3609 print('Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec')
3610 sys.stdout.flush()
3611
3612 # ____________________________________________________________
3613 # ____________________________________________________________
3614 - def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?
3615 """prints some infos according to `disp_annotation()`, if
3616 ``iteration_counter % modulo == 0``
3617
3618 """
3619 if modulo is None:
3620 modulo = self.opts['verb_disp']
3621
3622 # console display
3623 if modulo:
3624 if (self.countiter - 1) % (10 * modulo) < 1:
3625 self.disp_annotation()
3626 if self.countiter > 0 and (self.stop() or self.countiter < 4
3627 or self.countiter % modulo < 1):
3628 if self.opts['verb_time']:
3629 toc = self.elapsed_time()
3630 stime = str(int(toc // 60)) + ':' + str(round(toc % 60, 1))
3631 else:
3632 stime = ''
3633 print(' '.join((repr(self.countiter).rjust(5),
3634 repr(self.countevals).rjust(7),
3635 '%.15e' % (min(self.fit.fit)),
3636 '%4.1e' % (self.D.max() / self.D.min()),
3637 '%6.2e' % self.sigma,
3638 '%6.0e' % (self.sigma * sqrt(min(self.dC))),
3639 '%6.0e' % (self.sigma * sqrt(max(self.dC))),
3640 stime)))
3641 # if self.countiter < 4:
3642 sys.stdout.flush()
3643
3644 cma_default_options = {
3645 # the follow string arguments are evaluated, besides the verb_filenameprefix
3646 'AdaptSigma': 'CMAAdaptSigmaCSA # or any other CMAAdaptSigmaBase class e.g. CMAAdaptSigmaTPA',
3647 'CMA_active': 'True # negative update, conducted after the original update',
3648 'CMA_activefac': '1 # learning rate multiplier for active update',
3649 'CMA_cmean': '1 # learning rate for the mean value',
3650 'CMA_const_trace': 'False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero',
3651 'CMA_diagonal': '0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always', # TODO 4/ccov_separable?
3652 'CMA_eigenmethod': 'np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)',
3653 'CMA_elitist': 'False # elitism likely impairs global search performance',
3654 'CMA_mirrors': 'popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
3655 'CMA_mu': 'None # parents selection parameter, default is popsize // 2',
3656 'CMA_on': 'True # False or 0 for no adaptation of the covariance matrix',
3657 'CMA_sample_on_sphere_surface': 'False #v all mutation vectors have the same length',
3658 'CMA_rankmu': 'True # False or 0 for omitting rank-mu update of covariance matrix',
3659 'CMA_rankmualpha': '0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0',
3660 'CMA_dampsvec_fac': 'np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update',
3661 'CMA_dampsvec_fade': '0.1 # tentative fading out parameter for sigma vector update',
3662 'CMA_teststds': 'None # factors for non-isotropic initial distr. mainly for test purpose, see scaling_...',
3663 # 'CMA_AII': 'False # not yet tested',
3664 'CSA_dampfac': '1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere',
3665 'CSA_damp_mueff_exponent': '0.5 # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
3666 'CSA_disregard_length': 'False #v True is untested',
3667 'CSA_clip_length_value': 'None #v untested, [0, 0] means disregarding length completely',
3668 'CSA_squared': 'False #v use squared length for sigma-adaptation ',
3669 'boundary_handling': 'BoundTransform # or BoundPenalty, unused when ``bounds in (None, [None, None])``',
3670 'bounds': '[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector',
3671 # , eval_parallel2': 'not in use {"processes": None, "timeout": 12, "is_feasible": lambda x: True} # distributes function calls to processes processes'
3672 'fixed_variables': 'None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized',
3673 'ftarget': '-inf #v target function value, minimization',
3674 'is_feasible': 'is_feasible #v a function that computes feasibility, by default lambda x, f: f not in (None, np.NaN)',
3675 'maxfevals': 'inf #v maximum number of function evaluations',
3676 'maxiter': '100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations',
3677 'mean_shift_line_samples': 'False #v sample two new solutions colinear to previous mean shift',
3678 'mindx': '0 #v minimal std in any direction, cave interference with tol*',
3679 'minstd': '0 #v minimal std in any coordinate direction, cave interference with tol*',
3680 'pc_line_samples': 'False #v two line samples along the evolution path pc',
3681 'popsize': '4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration',
3682 'randn': 'np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)',
3683 'scaling_of_variables': 'None # scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is ones(N)',
3684 'seed': 'None # random number seed',
3685 'termination_callback': 'None #v a function returning True for termination, called after each iteration step and could be abused for side effects',
3686 'tolfacupx': '1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
3687 'tolupsigma': '1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements',
3688 'tolfun': '1e-11 #v termination criterion: tolerance in function value, quite useful',
3689 'tolfunhist': '1e-12 #v termination criterion: tolerance in function value history',
3690 'tolstagnation': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
3691 'tolx': '1e-11 #v termination criterion: tolerance in x-changes',
3692 'transformation': 'None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno',
3693 'typical_x': 'None # used with scaling_of_variables',
3694 'updatecovwait': 'None #v number of iterations without distribution update, name is subject to future changes', # TODO: rename: iterwaitupdatedistribution?
3695 'verbose': '1 #v verbosity e.v. of initial/final message, -1 is very quiet, not yet fully implemented',
3696 'verb_append': '0 # initial evaluation counter, if append, do not overwrite output files',
3697 'verb_disp': '100 #v verbosity: display console output every verb_disp iteration',
3698 'verb_filenameprefix': 'outcmaes # output filenames prefix',
3699 'verb_log': '1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions',
3700 'verb_plot': '0 #v in fmin(): plot() is called every verb_plot iteration',
3701 'verb_time': 'True #v output timings on console',
3702 'vv': '0 #? versatile variable for hacking purposes, value found in self.opts["vv"]'
3703 }
3704 -class CMAOptions(dict):
3705 """``CMAOptions()`` returns a dictionary with the available options
3706 and their default values for class ``CMAEvolutionStrategy``.
3707
3708 ``CMAOptions('pop')`` returns a subset of recognized options that
3709 contain 'pop' in there keyword name or (default) value or description.
3710
3711 ``CMAOptions(opts)`` returns the subset of recognized options in
3712 ``dict(opts)``.
3713
3714 Option values can be "written" in a string and, when passed to fmin
3715 or CMAEvolutionStrategy, are evaluated using "N" and "popsize" as
3716 known values for dimension and population size (sample size, number
3717 of new solutions per iteration). All default option values are such
3718 a string.
3719
3720 Details
3721 -------
3722 ``CMAOptions`` entries starting with ``tol`` are termination
3723 "tolerances".
3724
3725 For `tolstagnation`, the median over the first and the second half
3726 of at least `tolstagnation` iterations are compared for both, the
3727 per-iteration best and per-iteration median function value.
3728
3729 Example
3730 -------
3731 ::
3732
3733 import cma
3734 cma.CMAOptions('tol')
3735
3736 is a shortcut for cma.CMAOptions().match('tol') that returns all options
3737 that contain 'tol' in their name or description.
3738
3739 :See: `fmin`(), `CMAEvolutionStrategy`, `_CMAParameters`
3740
3741 """
3742
3743 # @classmethod # self is the class, not the instance
3744 # @property
3745 # def default(self):
3746 # """returns all options with defaults"""
3747 # return fmin([],[])
3748
3749 @staticmethod
3750 - def defaults():
3751 """return a dictionary with default option values and description"""
3752 return dict(cma_default_options)
3753
3754 @staticmethod
3755 - def versatileOptions():
3756 """return list of options that can be changed at any time (not
3757 only be initialized), however the list might not be entirely up
3758 to date.
3759
3760 The string ' #v ' in the default value indicates a 'versatile'
3761 option that can be changed any time.
3762
3763 """
3764 return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
3765
3766 - def __init__(self, s=None, unchecked=False):
3767 """return an `CMAOptions` instance, either with the default
3768 options, if ``s is None``, or with all options whose name or
3769 description contains `s`, if `s` is a string (case is
3770 disregarded), or with entries from dictionary `s` as options,
3771 not complemented with default options or settings
3772
3773 Returns: see above.
3774
3775 """
3776 # if not CMAOptions.defaults: # this is different from self.defaults!!!
3777 # CMAOptions.defaults = fmin([],[])
3778 if s is None:
3779 super(CMAOptions, self).__init__(CMAOptions.defaults()) # dict.__init__(self, CMAOptions.defaults()) should be the same
3780 # self = CMAOptions.defaults()
3781 elif isinstance(s, basestring):
3782 super(CMAOptions, self).__init__(CMAOptions().match(s))
3783 # we could return here
3784 else:
3785 super(CMAOptions, self).__init__(s)
3786
3787 if not unchecked:
3788 for key in list(self.keys()):
3789 if key not in CMAOptions.defaults():
3790 print('Warning in cma.CMAOptions.__init__(): invalid key ``' + str(key) + '`` removed')
3791 self.pop(key)
3792 # self.evaluated = False # would become an option entry
3793
3794 - def init(self, dict_or_str, val=None, warn=True):
3795 """initialize one or several options.
3796
3797 Arguments
3798 ---------
3799 `dict_or_str`
3800 a dictionary if ``val is None``, otherwise a key.
3801 If `val` is provided `dict_or_str` must be a valid key.
3802 `val`
3803 value for key
3804
3805 Details
3806 -------
3807 Only known keys are accepted. Known keys are in `CMAOptions.defaults()`
3808
3809 """
3810 # dic = dict_or_key if val is None else {dict_or_key:val}
3811 dic = dict_or_str
3812 if val is not None:
3813 dic = {dict_or_str:val}
3814
3815 for key, val in list(dic.items()):
3816 if key not in CMAOptions.defaults():
3817 # TODO: find a better solution?
3818 if warn:
3819 print('Warning in cma.CMAOptions.init(): key ' +
3820 str(key) + ' ignored')
3821 else:
3822 self[key] = val
3823
3824 return self
3825
3826 - def set(self, dic, val=None, warn=True):
3827 """set can assign versatile options from `CMAOptions.versatileOptions()`
3828 with a new value, use `init()` for the others.
3829
3830 Arguments
3831 ---------
3832 `dic`
3833 either a dictionary or a key. In the latter
3834 case, val must be provided
3835 `val`
3836 value for key
3837 `warn`
3838 bool, print a warning if the option cannot be changed
3839 and is therefore omitted
3840
3841 This method will be most probably used with the ``opts`` attribute of
3842 a `CMAEvolutionStrategy` instance.
3843
3844 """
3845 if val is not None: # dic is a key in this case
3846 dic = {dic:val} # compose a dictionary
3847 for key, val in list(dic.items()):
3848 if key in CMAOptions.versatileOptions():
3849 self[key] = val
3850 elif warn:
3851 print('Warning in cma.CMAOptions.set(): key ' + str(key) + ' ignored')
3852 return self # to allow o = CMAOptions(o).set(new)
3853
3854 - def complement(self):
3855 """add all missing options with their default values"""
3856
3857 for key in CMAOptions.defaults():
3858 if key not in self:
3859 self[key] = CMAOptions.defaults()[key]
3860 return self
3861
3862 - def settable(self):
3863 """return the subset of those options that are settable at any
3864 time.
3865
3866 Settable options are in `versatileOptions()`, but the
3867 list might be incomplete.
3868
3869 """
3870 return CMAOptions([i for i in list(self.items())
3871 if i[0] in CMAOptions.versatileOptions()])
3872
3873 - def __call__(self, key, default=None, loc=None):
3874 """evaluate and return the value of option `key` on the fly, or
3875 returns those options whose name or description contains `key`,
3876 case disregarded.
3877
3878 Details
3879 -------
3880 Keys that contain `filename` are not evaluated.
3881 For ``loc==None``, `self` is used as environment
3882 but this does not define `N`.
3883
3884 :See: `eval()`, `evalall()`
3885
3886 """
3887 try:
3888 val = self[key]
3889 except:
3890 return self.match(key)
3891
3892 if loc is None:
3893 loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing
3894 try:
3895 if isinstance(val, basestring):
3896 val = val.split('#')[0].strip() # remove comments
3897 if isinstance(val, basestring) and key.find('filename') < 0 and key.find('mindx') < 0:
3898 val = eval(val, globals(), loc)
3899 # invoke default
3900 # TODO: val in ... fails with array type, because it is applied element wise!
3901 # elif val in (None,(),[],{}) and default is not None:
3902 elif val is None and default is not None:
3903 val = eval(str(default), globals(), loc)
3904 except:
3905 pass # slighly optimistic: the previous is bug-free
3906 return val
3907
3908 - def eval(self, key, default=None, loc=None):
3909 """Evaluates and sets the specified option value in
3910 environment `loc`. Many options need `N` to be defined in
3911 `loc`, some need `popsize`.
3912
3913 Details
3914 -------
3915 Keys that contain 'filename' are not evaluated.
3916 For `loc` is None, the self-dict is used as environment
3917
3918 :See: `evalall()`, `__call__`
3919
3920 """
3921 # TODO: try: loc['dim'] = loc['N'] etc
3922 self[key] = self(key, default, loc)
3923 return self[key]
3924
3925 - def evalall(self, loc=None, defaults=None):
3926 """Evaluates all option values in environment `loc`.
3927
3928 :See: `eval()`
3929
3930 """
3931 if defaults is None:
3932 defaults = cma_default_options
3933 # TODO: this needs rather the parameter N instead of loc
3934 if 'N' in loc: # TODO: __init__ of CMA can be simplified
3935 popsize = self('popsize', defaults['popsize'], loc)
3936 for k in list(self.keys()):
3937 self.eval(k, defaults[k],
3938 {'N':loc['N'], 'popsize':popsize})
3939 return self
3940
3941 - def match(self, s=''):
3942 """return all options that match, in the name or the description,
3943 with string `s`, case is disregarded.
3944
3945 Example: ``cma.CMAOptions().match('verb')`` returns the verbosity options.
3946
3947 """
3948 match = s.lower()
3949 res = {}
3950 for k in sorted(self):
3951 s = str(k) + '=\'' + str(self[k]) + '\''
3952 if match in s.lower():
3953 res[k] = self[k]
3954 return CMAOptions(res)
3955
3956 - def pp(self):
3957 pprint(self)
3958
3959 - def pprint(self, linebreak=80):
3960 for i in sorted(self.items()):
3961 s = str(i[0]) + "='" + str(i[1]) + "'"
3962 a = s.split(' ')
3963
3964 # print s in chunks
3965 l = '' # start entire to the left
3966 while a:
3967 while a and len(l) + len(a[0]) < linebreak:
3968 l += ' ' + a.pop(0)
3969 print(l)
3970 l = ' ' # tab for subsequent lines
3971 print_ = pprint # Python style to prevent clash with keywords
3972 printme = pprint
3973
3974 # ____________________________________________________________
3975 # ____________________________________________________________
3976 -class CMAStopDict(dict):
3977 """keep and update a termination condition dictionary, which is
3978 "usually" empty and returned by `CMAEvolutionStrategy.stop()`.
3979 The class methods entirely depend on `CMAEvolutionStrategy` class
3980 attributes.
3981
3982 Details
3983 -------
3984 This class is not relevant for the end-user and could be a nested
3985 class, but nested classes cannot be serialized.
3986
3987 Example
3988 -------
3989 >>> import cma
3990 >>> sd = cma.CMAStopDict()
3991 >>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'verbose':-1})
3992 >>> print(sd(es))
3993 {}
3994 >>> es.optimize(cma.fcts.sphere, verb_disp=0)
3995 >>> print(sd(es))
3996 {'tolfun': 1e-11}
3997 >>> assert sd(es) == es.stop()
3998
3999 :See: `OOOptimizer.stop()`, `CMAEvolutionStrategy.stop()`
4000
4001 """
4002 - def __init__(self, d={}):
4003 update = isinstance(d, CMAEvolutionStrategy)
4004 inherit = isinstance(d, CMAStopDict)
4005 super(CMAStopDict, self).__init__({} if update else d)
4006 self._stoplist = d._stoplist if inherit else [] # multiple entries
4007 self.lastiter = d.lastiter if inherit else 0 # probably not necessary
4008 if update:
4009 self._update(d)
4010
4011 - def __call__(self, es=None):
4012 """update and return the termination conditions dictionary
4013
4014 """
4015 if es is None and self.es is None:
4016 raise ValueError('termination conditions need an optimizer to act upon')
4017 self._update(es)
4018 return self
4019
4020 - def _update(self, es):
4021 """Test termination criteria and update dictionary
4022
4023 """
4024 if es is None:
4025 es = self.es
4026 assert es is not None
4027 if es.countiter == self.lastiter:
4028 if es.countiter == 0:
4029 self.__init__()
4030 return self
4031 try:
4032 if es == self.es:
4033 return self
4034 except: # self.es not yet assigned
4035 pass
4036
4037 self.lastiter = es.countiter
4038 self.es = es
4039
4040 self.stoplist = []
4041
4042 N = es.N
4043 opts = es.opts
4044 self.opts = opts # a hack to get _addstop going
4045
4046 # fitness: generic criterion, user defined w/o default
4047 self._addstop('ftarget',
4048 es.best.f < opts['ftarget'])
4049 # maxiter, maxfevals: generic criteria
4050 self._addstop('maxfevals',
4051 es.countevals - 1 >= opts['maxfevals'])
4052 self._addstop('maxiter',
4053 es.countiter >= opts['maxiter'])
4054 # tolx, tolfacupx: generic criteria
4055 # tolfun, tolfunhist (CEC:tolfun includes hist)
4056 self._addstop('tolx',
4057 all([es.sigma * xi < opts['tolx'] for xi in es.pc]) and
4058 all([es.sigma * xi < opts['tolx'] for xi in sqrt(es.dC)]))
4059 self._addstop('tolfacupx',
4060 any([es.sigma * sig > es.sigma0 * opts['tolfacupx']
4061 for sig in sqrt(es.dC)]))
4062 self._addstop('tolfun',
4063 es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and
4064 max(es.fit.hist) - min(es.fit.hist) < opts['tolfun'])
4065 self._addstop('tolfunhist',
4066 len(es.fit.hist) > 9 and
4067 max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist'])
4068
4069 # worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5
4070 # but the median is not so much getting worse
4071 # / 5 reflects the sparsity of histbest/median
4072 # / 2 reflects the left and right part to be compared
4073 l = int(max((opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10)))
4074 # TODO: why max(..., len(histbest)/10) ???
4075 # TODO: the problem in the beginning is only with best ==> ???
4076 # equality should handle flat fitness
4077 self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50?
4078 1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and
4079 len(es.fit.histbest) > 100 and 2 * l < len(es.fit.histbest) and
4080 np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2 * l]) and
4081 np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2 * l]))
4082 # iiinteger: stagnation termination can prevent to find the optimum
4083
4084 self._addstop('tolupsigma', opts['tolupsigma'] and
4085 es.sigma / es.sigma0 / np.max(es.D) > opts['tolupsigma'])
4086
4087 if 1 < 3:
4088 # non-user defined, method specific
4089 # noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov
4090 self._addstop('noeffectcoord',
4091 any([es.mean[i] == es.mean[i] + 0.2 * es.sigma * sqrt(es.dC[i])
4092 for i in xrange(N)]))
4093 if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']:
4094 i = es.countiter % N
4095 self._addstop('noeffectaxis',
4096 sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N)
4097 self._addstop('conditioncov',
4098 es.D[-1] > 1e7 * es.D[0], 1e14) # TODO
4099
4100 self._addstop('callback', es.callbackstop) # termination_callback
4101 if len(self):
4102 self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate',
4103 len(es.fit.hist) > 9 and
4104 max(es.fit.hist) == min(es.fit.hist))
4105 return self
4106
4107 - def _addstop(self, key, cond, val=None):
4108 if cond:
4109 self.stoplist.append(key) # can have the same key twice
4110 self[key] = self.opts.get(key, None)
4111
4112 - def clear(self):
4113 for k in list(self):
4114 self.pop(k)
4115 self.stoplist = []
4116
4117 # ____________________________________________________________
4118 # ____________________________________________________________
4119 -class _CMAParameters(object):
4120 """strategy parameters like population size and learning rates.
4121
4122 Note:
4123 contrary to `CMAOptions`, `_CMAParameters` is not (yet) part of the
4124 "user-interface" and subject to future changes (it might become
4125 a `collections.namedtuple`)
4126
4127 Example
4128 -------
4129 >>> import cma
4130 >>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1)
4131 (6_w,12)-CMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=504519190) # the seed is "random" by default
4132 >>>
4133 >>> type(es.sp) # sp contains the strategy parameters
4134 <class 'cma._CMAParameters'>
4135 >>>
4136 >>> es.sp.disp()
4137 {'CMA_on': True,
4138 'N': 20,
4139 'c1': 0.004181139918745593,
4140 'c1_sep': 0.034327992810300939,
4141 'cc': 0.17176721127681213,
4142 'cc_sep': 0.25259494835857677,
4143 'cmean': 1.0,
4144 'cmu': 0.0085149624979034746,
4145 'cmu_sep': 0.057796356229390715,
4146 'cs': 0.21434997799189287,
4147 'damps': 1.2143499779918929,
4148 'mu': 6,
4149 'mu_f': 6.0,
4150 'mueff': 3.7294589343030671,
4151 'popsize': 12,
4152 'rankmualpha': 0.3,
4153 'weights': array([ 0.40240294, 0.25338908, 0.16622156, 0.10437523, 0.05640348,
4154 0.01720771])}
4155 >>>
4156 >> es.sp == cma._CMAParameters(20, 12, cma.CMAOptions().evalall({'N': 20}))
4157 True
4158
4159 :See: `CMAOptions`, `CMAEvolutionStrategy`
4160
4161 """
4162 - def __init__(self, N, opts, ccovfac=1, verbose=True):
4163 """Compute strategy parameters, mainly depending on
4164 dimension and population size, by calling `set`
4165
4166 """
4167 self.N = N
4168 if ccovfac == 1:
4169 ccovfac = opts['CMA_on'] # that's a hack
4170 self.popsize = None # declaring the attribute, not necessary though
4171 self.set(opts, ccovfac=ccovfac, verbose=verbose)
4172
4173 - def set(self, opts, popsize=None, ccovfac=1, verbose=True):
4174 """Compute strategy parameters as a function
4175 of dimension and population size """
4176
4177 alpha_cc = 1.0 # cc-correction for mueff, was zero before
4178
4179 def cone(df, mu, N, alphacov=2.0):
4180 """rank one update learning rate, ``df`` is disregarded and obsolete, reduce alphacov on noisy problems, say to 0.5"""
4181 return alphacov / ((N + 1.3)**2 + mu)
4182
4183 def cmu(df, mu, alphamu=0.0, alphacov=2.0):
4184 """rank mu learning rate, disregarding the constrant cmu <= 1 - cone"""
4185 c = alphacov * (alphamu + mu - 2 + 1 / mu) / ((N + 2)**2 + alphacov * mu / 2)
4186 # c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)
4187 # print 'cmu =', c
4188 return c
4189
4190 def conedf(df, mu, N):
4191 """used for computing separable learning rate"""
4192 return 1. / (df + 2.*sqrt(df) + float(mu) / N)
4193
4194 def cmudf(df, mu, alphamu):
4195 """used for computing separable learning rate"""
4196 return (alphamu + mu - 2. + 1. / mu) / (df + 4.*sqrt(df) + mu / 2.)
4197
4198 sp = self
4199 N = sp.N
4200 if popsize:
4201 opts.evalall({'N':N, 'popsize':popsize})
4202 else:
4203 popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in CMAOptions()
4204 sp.popsize = popsize
4205 if opts['CMA_mirrors'] < 0.5:
4206 sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
4207 elif opts['CMA_mirrors'] > 1:
4208 sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
4209 else:
4210 sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
4211 # lam = arange(2,22)
4212 # mirr = 0.16 + 0.29/lam
4213 # print(lam); print([int(0.5 + l) for l in mirr*lam])
4214 # [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
4215 # [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
4216
4217 sp.mu_f = sp.popsize / 2.0 # float value of mu
4218 if opts['CMA_mu'] is not None:
4219 sp.mu_f = opts['CMA_mu']
4220 sp.mu = int(sp.mu_f + 0.499999) # round down for x.5
4221 # in principle we have mu_opt = popsize/2 + lam_mirr/2,
4222 # which means in particular weights should only be negative for q > 0.5+mirr_frac/2
4223 if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:
4224 print("WARNING: pairwise selection is not implemented, therefore " +
4225 " mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
4226 sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
4227 if sp.lam_mirr > sp.popsize // 2:
4228 raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
4229 "theoretically optimal is 0.159")
4230 sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))
4231 sp.weights /= sum(sp.weights)
4232 sp.mueff = 1 / sum(sp.weights**2)
4233 sp.cs = (sp.mueff + 2) / (N + sp.mueff + 3)
4234 # TODO: clean up (here the cumulation constant is shorter if sigma_vec is used)
4235 sp.dampsvec = opts['CMA_dampsvec_fac'] * (N + 2) if opts['CMA_dampsvec_fac'] else np.Inf
4236 sp.dampsvec_fading = opts['CMA_dampsvec_fade']
4237 if np.isfinite(sp.dampsvec):
4238 sp.cs = ((sp.mueff + 2) / (N + sp.mueff + 3))**0.5
4239 # sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)
4240 sp.cc = (4 + alpha_cc * sp.mueff / N) / (N + 4 + alpha_cc * 2 * sp.mueff / N)
4241 sp.cc_sep = (1 + 1 / N + alpha_cc * sp.mueff / N) / (N**0.5 + 1 / N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc
4242 sp.rankmualpha = opts['CMA_rankmualpha']
4243 # sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)
4244 sp.c1 = ccovfac * min(1, sp.popsize / 6) * cone((N**2 + N) / 2, sp.mueff, N) # 2. / ((N+1.3)**2 + sp.mucov)
4245 sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)
4246 if opts['CMA_rankmu'] != 0: # also empty
4247 sp.cmu = min(1 - sp.c1, ccovfac * cmu((N**2 + N) / 2, sp.mueff, sp.rankmualpha))
4248 sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))
4249 else:
4250 sp.cmu = sp.cmu_sep = 0
4251
4252 sp.neg = _BlancClass()
4253 if opts['CMA_active']:
4254 # in principle we have mu_opt = popsize/2 + lam_mirr/2,
4255 # which means in particular weights should only be negative for q > 0.5+mirr_frac/2
4256 if 1 < 3: # seems most natural: continuation of log(lambda/2) - log(k) qqqqqqqqqqqqqqqqqqqqqqqqqq
4257 sp.neg.mu_f = popsize // 2 # not sure anymore what this is good for
4258 sp.neg.weights = array([log(k) - log(popsize/2 + 1/2) for k in np.arange(np.ceil(popsize/2 + 1.1/2), popsize + .1)])
4259 sp.neg.mu = len(sp.neg.weights)
4260 sp.neg.weights /= sum(sp.neg.weights)
4261 sp.neg.mueff = 1 / sum(sp.neg.weights**2)
4262 sp.neg.cmuexp = opts['CMA_activefac'] * 0.5 * sp.neg.mueff / ((N + 2)**1.5 + 1.0 * sp.neg.mueff)
4263 # reasoning on learning rate cmuexp: with sum |w| == 1 and
4264 # length-normalized vectors in the update, the residual
4265 # variance in any direction exceeds exp(-N*cmuexp)
4266 assert sp.neg.mu >= sp.lam_mirr # not really necessary
4267 # sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical
4268 else:
4269 sp.neg.cmuexp = 0
4270
4271 sp.CMA_on = sp.c1 + sp.cmu > 0
4272 # print(sp.c1_sep / sp.cc_sep)
4273
4274 if not opts['CMA_on'] and opts['CMA_on'] not in (None, [], (), ''):
4275 sp.CMA_on = False
4276 # sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
4277 mueff_exponent = 0.5
4278 if 1 < 3:
4279 mueff_exponent = opts['CSA_damp_mueff_exponent']
4280 # TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
4281 sp.damps = opts['CSA_dampfac'] * (0.5 +
4282 0.5 * min([1, (sp.lam_mirr / (0.159 * sp.popsize) - 1)**2])**1 +
4283 2 * max([0, ((sp.mueff - 1) / (N + 1))**mueff_exponent - 1]) + sp.cs
4284 )
4285 sp.cmean = float(opts['CMA_cmean'])
4286 # sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate
4287 # in larger dim it does, 15-D with defaults, kappa=8 factor 2
4288 if sp.cmean != 1:
4289 print(' cmean = %f' % (sp.cmean))
4290
4291 if verbose:
4292 if not sp.CMA_on:
4293 print('covariance matrix adaptation turned off')
4294 if opts['CMA_mu'] != None:
4295 print('mu = %f' % (sp.mu_f))
4296
4297 # return self # the constructor returns itself
4298
4299 - def disp(self):
4300 pprint(self.__dict__)
4301
4302 -def fmin(objective_function, x0, sigma0,
4303 options=None,
4304 args=(),
4305 restarts=0,
4306 restart_from_best='False',
4307 incpopsize=2,
4308 eval_initial_x=False,
4309 noise_handler=None,
4310 noise_change_sigma_exponent=1,
4311 noise_kappa_exponent=0 # TODO: add max kappa value as parameter
4312 ):
4313 """functional interface to the stochastic optimizer CMA-ES
4314 for non-convex function minimization.
4315
4316 Calling Sequences
4317 =================
4318 ``fmin(objective_function, x0, sigma0)``
4319 minimizes `objective_function` starting at `x0` and with standard deviation
4320 `sigma0` (step-size)
4321 ``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})``
4322 minimizes `objective_function` up to target function value 1e-5, which
4323 is typically useful for benchmarking.
4324 ``fmin(objective_function, x0, sigma0, args=('f',))``
4325 minimizes `objective_function` called with an additional argument ``'f'``.
4326 ``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})``
4327 uses additional options ``ftarget`` and ``popsize``
4328 ``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})``
4329 uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
4330 `objective_function`, similar to `esobj.optimize()`.
4331
4332 Arguments
4333 =========
4334 `objective_function`
4335 function to be minimized. Called as ``objective_function(x,*args)``.
4336 `x` is a one-dimensional `numpy.ndarray`. `objective_function`
4337 can return `numpy.NaN`,
4338 which is interpreted as outright rejection of solution `x`
4339 and invokes an immediate resampling and (re-)evaluation
4340 of a new solution not counting as function evaluation.
4341 `x0`
4342 list or `numpy.ndarray`, initial guess of minimum solution
4343 before the application of the geno-phenotype transformation
4344 according to the ``transformation`` option. Otherwise
4345 `x0` can also be a `cma.CMAEvolutionStrategy` object instance.
4346 In the latter case `sigma0` can be ``None``.
4347 `sigma0`
4348 scalar, initial standard deviation in each coordinate.
4349 `sigma0` should be about 1/4th of the search domain width (where the
4350 optimum is to be expected). The variables in `objective_function`
4351 should be scaled such that they presumably have similar sensitivity.
4352 See also option `scaling_of_variables`.
4353 `options`
4354 a dictionary with additional options passed to the constructor
4355 of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()`` for
4356 a list of available options.
4357 ``args=()``
4358 arguments to be used to call the `objective_function`
4359 ``restarts=0``
4360 number of restarts
4361 ``restart_from_best=False``
4362 which point to restart from
4363 ``incpopsize=2``
4364 multiplier for increasing the population size `popsize` before each restart
4365 ``eval_initial_x=False``
4366 evaluate initial solution
4367 ``noise_handler=None``
4368 a ``class NoiseHandler`` object or ``None``
4369 ``noise_change_sigma_exponent=1``
4370 exponent for sigma increment for additional noise treatment
4371 ``noise_evaluations_as_kappa``
4372 instead of applying reevaluations, the "number of evaluations"
4373 is (ab)used as scaling factor kappa (experimental).
4374
4375 Optional Arguments
4376 ==================
4377 All values in the `options` dictionary are evaluated if they are of
4378 type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for details.
4379 The full list is available in ``cma.default_options``.
4380
4381 >>> import cma
4382 >>> cma.CMAOptions()
4383
4384 Subsets of options can be displayed, for example like ``cma.CMAOptions('tol')``,
4385 or ``cma.CMAOptions('bound')``, see also class `CMAOptions`.
4386
4387 Return
4388 ======
4389 Similar to `OOOptimizer.optimize()` and/or `CMAEvolutionStrategy.optimize()`, return the
4390 list provided by `CMAEvolutionStrategy.result()` appended with an `OOOptimizer` and an
4391 `BaseDataLogger`::
4392
4393 res = es.result() + (es.stop(), es, logger)
4394
4395 where
4396 - ``res[0]`` (``xopt``) -- best evaluated solution
4397 - ``res[1]`` (``fopt``) -- respective function value
4398 - ``res[2]`` (``evalsopt``) -- respective number of function evaluations
4399 - ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
4400 - ``res[4]`` (``iterations``) -- number of overall conducted iterations
4401 - ``res[5]`` (``xmean``) -- mean of the final sample distribution
4402 - ``res[6]`` (``stds``) -- effective stds of the final sample distribution
4403 - ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
4404 - ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
4405 - ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
4406
4407 Details
4408 =======
4409 This function is an interface to the class `CMAEvolutionStrategy`. The
4410 latter class should be used when full control over the iteration loop
4411 of the optimizer is desired.
4412
4413 The noise handling follows closely [Hansen et al 2009, A Method for Handling
4414 Uncertainty in Evolutionary Optimization...] in the measurement part, but the
4415 implemented treatment is slightly different: for ``noiseS > 0``, ``evaluations``
4416 (time) and sigma are increased by ``alpha``. For ``noiseS < 0``, ``evaluations``
4417 (time) is decreased by ``alpha**(1/4)``. The option ``noise_handling`` switches
4418 the noise handling on/off, the given value defines the maximal number
4419 of evaluations for a single fitness computation. If ``noise_handling`` is a list,
4420 the smallest element defines the minimal number and if the list has three elements,
4421 the median value is the start value for ``evaluations``. See also class
4422 `NoiseHandler`.
4423
4424 Examples
4425 ========
4426 The following example calls `fmin` optimizing the Rosenbrock function
4427 in 10-D with initial solution 0.1 and initial step-size 0.5. The
4428 options are specified for the usage with the `doctest` module.
4429
4430 >>> import cma
4431 >>> # cma.CMAOptions() # returns all possible options
4432 >>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0}
4433 >>>
4434 >>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options)
4435 (5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
4436 Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
4437 Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
4438 1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
4439 2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
4440 3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
4441 100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
4442 200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
4443 300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
4444 400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
4445 500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
4446 600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
4447 650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
4448 termination on tolfun : 1e-11
4449 final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
4450 mean solution: [ 1. 1.00000001 1. 1.
4451 1. 1.00000001 1.00000002 1.00000003 ...]
4452 std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
4453 5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
4454 >>>
4455 >>> print('best solutions fitness = %f' % (res[1]))
4456 best solutions fitness = 2.62435631419e-14
4457 >>> assert res[1] < 1e-12
4458
4459 The above call is pretty much equivalent with the slightly more
4460 verbose call ::
4461
4462 res = cma.CMAEvolutionStrategy([0.1] * 10, 0.5,
4463 options=options).optimize(cma.fcts.rosen)
4464
4465 In either case, the method ::
4466
4467 cma.plot();
4468
4469 (based on `matplotlib.pyplot`) produces a plot of the run and, if
4470 necessary::
4471
4472 cma.show()
4473
4474 shows the plot in a window. To continue you might need to
4475 close the pop-up window. This behavior seems to disappear in
4476 subsequent calls of `cma.plot()` and is avoided by using
4477 `ipython` with `-pylab` option. Finally ::
4478
4479 cma.savefig('myfirstrun') # savefig from matplotlib.pyplot
4480
4481 will save the figure in a png.
4482
4483 :See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`,
4484 `CMAOptions`, `scipy.optimize.fmin()`
4485
4486 """ # style guides say there should be the above empty line
4487 if 1 < 3: # try: # pass on KeyboardInterrupt
4488 if not objective_function: # return available options in a dictionary
4489 return CMAOptions() # these opts are by definition valid
4490
4491 fmin_options = locals().copy() # archive original options
4492 del fmin_options['objective_function']
4493 del fmin_options['x0']
4494 del fmin_options['sigma0']
4495 del fmin_options['options']
4496 del fmin_options['args']
4497
4498 if options is None:
4499 options = cma_default_options
4500 opts = CMAOptions(options.copy()).complement()
4501
4502 irun = 0
4503 best = BestSolution()
4504 while True: # restart loop
4505 # recover from a CMA object
4506 if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
4507 es = x0
4508 x0 = es.inputargs['x0'] # for the next restarts
4509 if sigma0 is None or not np.isscalar(array(sigma0)):
4510 sigma0 = es.inputargs['sigma0'] # for the next restarts
4511 # ignore further input args and keep original options
4512 else: # default case
4513 if irun and eval(str(fmin_options['restart_from_best'])):
4514 print('CAVE: restart_from_best is often not useful')
4515 es = CMAEvolutionStrategy(best.x, sigma0, opts)
4516 else:
4517 es = CMAEvolutionStrategy(x0, sigma0, opts)
4518 if eval_initial_x:
4519 x = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions)
4520 es.best.update([x], es.sent_solutions, [objective_function(x, *args)], 1)
4521 es.countevals += 1
4522
4523 opts = es.opts # processed options, unambiguous
4524 # a hack:
4525 fmin_opts = CMAOptions(fmin_options.copy(), unchecked=True)
4526 for k in fmin_opts:
4527 # locals() cannot be modified directly, exec won't work in 3.x, therefore
4528 fmin_opts.eval(k, loc={'N': es.N, 'popsize': opts['popsize']})
4529
4530 append = opts['verb_append'] or es.countiter > 0 or irun > 0
4531 # es.logger is "the same" logger, because the "identity" is only determined by the `filenameprefix`
4532 logger = CMADataLogger(opts['verb_filenameprefix'], opts['verb_log'])
4533 logger.register(es, append).add() # initial values, not fitness values
4534
4535 # if es.countiter == 0 and es.opts['verb_log'] > 0 and not es.opts['verb_append']:
4536 # logger = CMADataLogger(es.opts['verb_filenameprefix']).register(es)
4537 # logger.add()
4538 # es.writeOutput() # initial values for sigma etc
4539
4540 if 1 < 3:
4541 if noise_handler:
4542 noisehandler = noise_handler
4543 noise_handling = True
4544 else:
4545 noisehandler = NoiseHandler(es.N, 0)
4546 noise_handling = False
4547 es.noise_handler = noisehandler
4548
4549 # the problem: this assumes that good solutions cannot take longer than bad ones:
4550 # with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:
4551 if 1 < 3:
4552 while not es.stop(): # iteration loop
4553 # X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)
4554 X, fit = es.ask_and_eval(objective_function, args,
4555 evaluations=noisehandler.evaluations,
4556 aggregation=np.median) # treats NaN with resampling
4557 # TODO: check args and in case use args=(noisehandler.evaluations, )
4558
4559 es.tell(X, fit) # prepare for next iteration
4560 if noise_handling: # it would be better to also use these f-evaluations in tell
4561 es.sigma *= noisehandler(X, fit, objective_function, es.ask,
4562 args=args)**fmin_opts['noise_change_sigma_exponent']
4563 es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
4564 if 1 < 3:
4565 es.sp.cmean *= exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS))
4566 if es.sp.cmean > 1:
4567 es.sp.cmean = 1
4568
4569 es.disp()
4570 logger.add(more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
4571 modulo=1 if es.stop() and logger.modulo else None)
4572 if (opts['verb_log'] and opts['verb_plot'] and
4573 (es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())):
4574 logger.plot(324, fontsize=10)
4575
4576 # end while not es.stop
4577 mean_pheno = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions)
4578 fmean = objective_function(mean_pheno, *args)
4579 es.countevals += 1
4580
4581 es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals)
4582 best.update(es.best, es.sent_solutions) # in restarted case
4583
4584 # final message
4585 if opts['verb_disp']:
4586 es.result_pretty(irun, time.asctime(time.localtime()))
4587
4588 irun += 1
4589 if irun > fmin_opts['restarts'] or 'ftarget' in es.stopdict or 'maxfevals' in es.stopdict:
4590 break
4591 opts['verb_append'] = es.countevals
4592 opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options?
4593 opts['seed'] += 1
4594
4595 # while irun
4596
4597 es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
4598 if 1 < 3:
4599 return es.result() + (es.stop(), es, logger)
4600
4601 else: # previously: to be removed
4602 return (best.x.copy(), best.f, es.countevals,
4603 dict((('stopdict', CMAStopDict(es.stopdict))
4604 , ('mean', es.gp.pheno(es.mean))
4605 , ('std', es.sigma * sqrt(es.dC) * es.gp.scales)
4606 , ('out', es.out)
4607 , ('opts', es.opts) # last state of options
4608 , ('cma', es)
4609 , ('inputargs', es.inputargs)
4610 ))
4611 )
4612 # TODO refine output, can #args be flexible?
4613 # is this well usable as it is now?
4614 else: # except KeyboardInterrupt: # Exception, e:
4615 if eval(str(options['verb_disp'])) > 0:
4616 print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
4617 raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit
4618
4619 # _____________________________________________________________________
4620 # _____________________________________________________________________
4621 #
4622 -class BaseDataLogger(object):
4623 """"abstract" base class for a data logger that can be used with an `OOOptimizer`
4624
4625 Details: attribute `modulo` is used in ``OOOptimizer.optimize``
4626
4627 """
4628 - def add(self, optim=None, more_data=[]):
4629 """abstract method, add a "data point" from the state of `optim` into the
4630 logger, the argument `optim` can be omitted if it was `register()`-ed before,
4631 acts like an event handler"""
4632 raise NotImplementedError()
4633 - def register(self, optim):
4634 """abstract method, register an optimizer `optim`, only needed if `add()` is
4635 called without a value for the `optim` argument"""
4636 self.optim = optim
4637 - def disp(self):
4638 """display some data trace (not implemented)"""
4639 print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))
4640 - def plot(self):
4641 """plot data (not implemented)"""
4642 print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))
4643 - def data(self):
4644 """return logged data in a dictionary (not implemented)"""
4645 print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))
4646
4647 # _____________________________________________________________________
4648 # _____________________________________________________________________
4649 #
4650 -class CMADataLogger(BaseDataLogger):
4651 """data logger for class `CMAEvolutionStrategy`. The logger is
4652 identified by its name prefix and (over-)writes or reads according
4653 data files. Therefore, the logger must be considered as *global* variable
4654 with unpredictable side effects, if two loggers with the same name
4655 and on the same working folder are used at the same time.
4656
4657 Examples
4658 ========
4659 ::
4660
4661 import cma
4662 es = cma.CMAEvolutionStrategy(...)
4663 logger = cma.CMADataLogger().register(es)
4664 while not es.stop():
4665 ...
4666 logger.add() # add can also take an argument
4667
4668 logger.plot() # or a short cut can be used:
4669 cma.plot() # plot data from logger with default name
4670
4671
4672 logger2 = cma.CMADataLogger('just_another_filename_prefix').load()
4673 logger2.plot()
4674 logger2.disp()
4675
4676 ::
4677
4678 import cma
4679 from matplotlib.pylab import *
4680 res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
4681 logger = res[-1] # the CMADataLogger
4682 logger.load() # by "default" data are on disk
4683 semilogy(logger.f[:,0], logger.f[:,5]) # plot f versus iteration, see file header
4684 show()
4685
4686 Details
4687 =======
4688 After loading data, the logger has the attributes `xmean`, `xrecent`, `std`, `f`, and `D`,
4689 corresponding to ``xmean``, ``xrecentbest``, ``stddev``, ``fit``, and ``axlen`` filename
4690 trails.
4691
4692 :See: `disp()`, `plot()`
4693
4694 """
4695 default_prefix = 'outcmaes'
4696 # names = ('axlen','fit','stddev','xmean','xrecentbest')
4697 # key_names_with_annotation = ('std', 'xmean', 'xrecent')
4698
4699 - def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
4700 """initialize logging of data from a `CMAEvolutionStrategy` instance,
4701 default ``modulo=1`` means logging with each call
4702
4703 """
4704 # super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[], 'sig':[], 'fit':[], 'xm':[]})
4705 # class properties:
4706 self.file_names = ('axlen', 'fit', 'stddev', 'xmean', 'xrecentbest') # used in load, however hard-coded in add
4707 self.key_names = ('D', 'f', 'std', 'xmean', 'xrecent') # used in load, however hard-coded in plot
4708 self.key_names_with_annotation = ('std', 'xmean', 'xrecent') # used in load
4709 self.modulo = modulo # allows calling with None
4710 self.append = append
4711 self.counter = 0 # number of calls of add, should initial value depend on `append`?
4712 self.last_iteration = 0
4713 self.name_prefix = name_prefix if name_prefix else CMADataLogger.default_prefix
4714 if isinstance(self.name_prefix, CMAEvolutionStrategy):
4715 self.name_prefix = self.name_prefix.opts.eval('verb_filenameprefix')
4716 self.registered = False
4717
4718 - def register(self, es, append=None, modulo=None):
4719 """register a `CMAEvolutionStrategy` instance for logging,
4720 ``append=True`` appends to previous data logged under the same name,
4721 by default previous data are overwritten.
4722
4723 """
4724 if not isinstance(es, CMAEvolutionStrategy):
4725 raise TypeError("only class CMAEvolutionStrategy can be registered for logging")
4726 self.es = es
4727 if append is not None:
4728 self.append = append
4729 if modulo is not None:
4730 self.modulo = modulo
4731 self.registered = True
4732 return self
4733
4734 - def initialize(self, modulo=None):
4735 """reset logger, overwrite original files, `modulo`: log only every modulo call"""
4736 if modulo is not None:
4737 self.modulo = modulo
4738 try:
4739 es = self.es # must have been registered
4740 except AttributeError:
4741 pass # TODO: revise usage of es... that this can pass
4742 raise _Error('call register() before initialize()')
4743
4744 self.counter = 0 # number of calls of add
4745 self.last_iteration = 0 # some lines are only written if iteration>last_iteration
4746
4747 # write headers for output
4748 fn = self.name_prefix + 'fit.dat'
4749 strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime())
4750
4751 try:
4752 with open(fn, 'w') as f:
4753 f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
4754 'bestever, best, median, worst objective function value, ' +
4755 'further objective values of best", ' +
4756 strseedtime +
4757 # strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
4758 '\n')
4759 except (IOError, OSError):
4760 print('could not open file ' + fn)
4761
4762 fn = self.name_prefix + 'axlen.dat'
4763 try:
4764 f = open(fn, 'w')
4765 f.write('% columns="iteration, evaluation, sigma, max axis length, ' +
4766 ' min axis length, all principle axes lengths ' +
4767 ' (sorted square roots of eigenvalues of C)", ' +
4768 strseedtime +
4769 '\n')
4770 f.close()
4771 except (IOError, OSError):
4772 print('could not open file ' + fn)
4773 finally:
4774 f.close()
4775 fn = self.name_prefix + 'stddev.dat'
4776 try:
4777 f = open(fn, 'w')
4778 f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
4779 ' stds==sigma*sqrt(diag(C))", ' +
4780 strseedtime +
4781 '\n')
4782 f.close()
4783 except (IOError, OSError):
4784 print('could not open file ' + fn)
4785 finally:
4786 f.close()
4787
4788 fn = self.name_prefix + 'xmean.dat'
4789 try:
4790 with open(fn, 'w') as f:
4791 f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
4792 strseedtime)
4793 f.write(' # scaling_of_variables: ')
4794 if np.size(es.gp.scales) > 1:
4795 f.write(' '.join(map(str, es.gp.scales)))
4796 else:
4797 f.write(str(es.gp.scales))
4798 f.write(', typical_x: ')
4799 if np.size(es.gp.typical_x) > 1:
4800 f.write(' '.join(map(str, es.gp.typical_x)))
4801 else:
4802 f.write(str(es.gp.typical_x))
4803 f.write('\n')
4804 f.close()
4805 except (IOError, OSError):
4806 print('could not open/write file ' + fn)
4807
4808 fn = self.name_prefix + 'xrecentbest.dat'
4809 try:
4810 with open(fn, 'w') as f:
4811 f.write('% # iter+eval+sigma+0+fitness+xbest, ' +
4812 strseedtime +
4813 '\n')
4814 except (IOError, OSError):
4815 print('could not open/write file ' + fn)
4816
4817 return self
4818 # end def __init__
4819
4820 - def load(self, filenameprefix=None):
4821 """loads data from files written and return a data dictionary, *not*
4822 a prerequisite for using `plot()` or `disp()`.
4823
4824 Argument `filenameprefix` is the filename prefix of data to be loaded (five files),
4825 by default ``'outcmaes'``.
4826
4827 Return data dictionary with keys `xrecent`, `xmean`, `f`, `D`, `std`
4828
4829 """
4830 if not filenameprefix:
4831 filenameprefix = self.name_prefix
4832 for i in rglen((self.file_names)):
4833 fn = filenameprefix + self.file_names[i] + '.dat'
4834 try:
4835 self.__dict__[self.key_names[i]] = _fileToMatrix(fn)
4836 except:
4837 print('WARNING: reading from file "' + fn + '" failed')
4838 if self.key_names[i] in self.key_names_with_annotation:
4839 self.__dict__[self.key_names[i]].append(self.__dict__[self.key_names[i]][-1]) # copy last row to later fill in annotation position for display
4840 self.__dict__[self.key_names[i]] = array(self.__dict__[self.key_names[i]], copy=False)
4841 return self
4842
4843 - def add(self, es=None, more_data=[], modulo=None): # TODO: find a different way to communicate current x and f
4844 """append some logging data from `CMAEvolutionStrategy` class instance `es`,
4845 if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
4846
4847 The sequence ``more_data`` must always have the same length.
4848
4849 When used for a different optimizer class, this function can be
4850 (easily?) adapted by changing the assignments under INTERFACE
4851 in the implemention.
4852
4853 """
4854 mod = modulo if modulo is not None else self.modulo
4855 self.counter += 1
4856 if mod == 0 or (self.counter > 3 and (self.counter - 1) % mod):
4857 return
4858 if es is None:
4859 try:
4860 es = self.es # must have been registered
4861 except AttributeError :
4862 raise _Error('call `add` with argument `es` or ``register(es)`` before ``add()``')
4863 elif not self.registered:
4864 self.register(es)
4865
4866 if 1 < 3:
4867 if self.counter == 1 and not self.append and self.modulo != 0:
4868 self.initialize() # write file headers
4869 self.counter = 1
4870
4871 # --- INTERFACE, can be changed if necessary ---
4872 if not isinstance(es, CMAEvolutionStrategy): # not necessary
4873 print('WARNING: <type \'CMAEvolutionStrategy\'> expected, found '
4874 + str(type(es)) + ' in method CMADataLogger.add')
4875 evals = es.countevals
4876 iteration = es.countiter
4877 sigma = es.sigma
4878 axratio = es.D.max() / es.D.min()
4879 xmean = es.mean # TODO: should be optionally phenotype?
4880 fmean_noise_free = es.fmean_noise_free
4881 fmean = es.fmean
4882 try:
4883 besteverf = es.best.f
4884 bestf = es.fit.fit[0]
4885 worstf = es.fit.fit[-1]
4886 medianf = es.fit.fit[es.sp.popsize // 2]
4887 except:
4888 if iteration > 0: # first call without f-values is OK
4889 raise
4890 try:
4891 xrecent = es.best.last.x
4892 except:
4893 xrecent = None
4894 maxD = es.D.max()
4895 minD = es.D.min()
4896 diagD = es.D
4897 diagC = es.sigma * es.sigma_vec * sqrt(es.dC)
4898 more_to_write = es.more_to_write
4899 es.more_to_write = []
4900 # --- end interface ---
4901
4902 try:
4903 # fit
4904 if iteration > self.last_iteration:
4905 fn = self.name_prefix + 'fit.dat'
4906 with open(fn, 'a') as f:
4907 f.write(str(iteration) + ' '
4908 + str(evals) + ' '
4909 + str(sigma) + ' '
4910 + str(axratio) + ' '
4911 + str(besteverf) + ' '
4912 + '%.16e' % bestf + ' '
4913 + str(medianf) + ' '
4914 + str(worstf) + ' '
4915 # + str(es.sp.popsize) + ' '
4916 # + str(10**es.noiseS) + ' '
4917 # + str(es.sp.cmean) + ' '
4918 + ' '.join(str(i) for i in more_to_write) + ' '
4919 + ' '.join(str(i) for i in more_data) + ' '
4920 + '\n')
4921 # axlen
4922 fn = self.name_prefix + 'axlen.dat'
4923 with open(fn, 'a') as f: # does not rely on reference counting
4924 f.write(str(iteration) + ' '
4925 + str(evals) + ' '
4926 + str(sigma) + ' '
4927 + str(maxD) + ' '
4928 + str(minD) + ' '
4929 + ' '.join(map(str, diagD))
4930 + '\n')
4931 # stddev
4932 fn = self.name_prefix + 'stddev.dat'
4933 with open(fn, 'a') as f:
4934 f.write(str(iteration) + ' '
4935 + str(evals) + ' '
4936 + str(sigma) + ' '
4937 + '0 0 '
4938 + ' '.join(map(str, diagC))
4939 + '\n')
4940 # xmean
4941 fn = self.name_prefix + 'xmean.dat'
4942 with open(fn, 'a') as f:
4943 f.write(str(iteration) + ' '
4944 + str(evals) + ' '
4945 # + str(sigma) + ' '
4946 + '0 '
4947 + str(fmean_noise_free) + ' '
4948 + str(fmean) + ' ' # TODO: this does not make sense
4949 # TODO should be optional the phenotyp?
4950 + ' '.join(map(str, xmean))
4951 + '\n')
4952 # xrecent
4953 fn = self.name_prefix + 'xrecentbest.dat'
4954 if iteration > 0 and xrecent is not None:
4955 with open(fn, 'a') as f:
4956 f.write(str(iteration) + ' '
4957 + str(evals) + ' '
4958 + str(sigma) + ' '
4959 + '0 '
4960 + str(bestf) + ' '
4961 + ' '.join(map(str, xrecent))
4962 + '\n')
4963
4964 except (IOError, OSError):
4965 if iteration <= 1:
4966 print('could not open/write file')
4967 self.last_iteration = iteration
4968
4969 - def closefig(self):
4970 pyplot.close(self.fighandle)
4971
4972 - def save(self, nameprefix, switch=False):
4973 """saves logger data to a different set of files, for
4974 ``switch=True`` also the loggers name prefix is switched to
4975 the new value
4976
4977 """
4978 if not nameprefix or not isinstance(nameprefix, basestring):
4979 raise _Error('filename prefix must be a nonempty string')
4980
4981 if nameprefix == self.default_prefix:
4982 raise _Error('cannot save to default name "' + nameprefix + '...", chose another name')
4983
4984 if nameprefix == self.name_prefix:
4985 return
4986
4987 for name in CMADataLogger.names:
4988 open(nameprefix + name + '.dat', 'w').write(open(self.name_prefix + name + '.dat').read())
4989
4990 if switch:
4991 self.name_prefix = nameprefix
4992
4993 - def plot(self, fig=None, iabscissa=1, iteridx=None,
4994 plot_mean=False, # was: plot_mean=True
4995 foffset=1e-19, x_opt=None, fontsize=10):
4996 """
4997 plot data from a `CMADataLogger` (using the files written by the logger).
4998
4999 Arguments
5000 ---------
5001 `fig`
5002 figure number, by default 325
5003 `iabscissa`
5004 ``0==plot`` versus iteration count,
5005 ``1==plot`` versus function evaluation number
5006 `iteridx`
5007 iteration indices to plot
5008
5009 Return `CMADataLogger` itself.
5010
5011 Examples
5012 --------
5013 ::
5014
5015 import cma
5016 logger = cma.CMADataLogger() # with default name
5017 # try to plot the "default logging" data (e.g.
5018 # from previous fmin calls, which is essentially what
5019 # also cma.plot() does)
5020 logger.plot()
5021 cma.savefig('fig325.png') # save current figure
5022 logger.closefig()
5023
5024 Dependencies: matlabplotlib/pyplot.
5025
5026 """
5027 dat = self.load(self.name_prefix)
5028 try:
5029 # pyplot: prodedural interface for matplotlib
5030 from matplotlib.pyplot import figure, subplot, semilogy, hold, plot, grid, \
5031 axis, title, text, xlabel, isinteractive, gcf
5032
5033 except ImportError:
5034 ImportError('could not find matplotlib.pyplot module, function plot() is not available')
5035 return
5036
5037 if fontsize and pyplot.rcParams['font.size'] != fontsize:
5038 print('global variable pyplot.rcParams[\'font.size\'] set (from ' +
5039 str(pyplot.rcParams['font.size']) + ') to ' + str(fontsize))
5040 pyplot.rcParams['font.size'] = fontsize # subtracted in the end, but return can happen inbetween
5041
5042 if fig:
5043 figure(fig)
5044 else:
5045 figure(325)
5046 # show() # should not be necessary
5047 self.fighandle = gcf() # fighandle.number
5048
5049 if iabscissa not in (0, 1):
5050 iabscissa = 1
5051
5052 # interactive_status = matplotlib.is_interactive()
5053 pyplot.ioff() # prevents immediate drawing, much faster
5054
5055 dat.x = dat.xmean # this is the genotyp
5056 if not plot_mean:
5057 if len(dat.x) < 2:
5058 print('not enough data to plot recent x')
5059 else:
5060 dat.x = dat.xrecent
5061
5062 if iteridx is not None:
5063 dat.f = dat.f[np.where([x in iteridx for x in dat.f[:, 0]])[0], :]
5064 dat.D = dat.D[np.where([x in iteridx for x in dat.D[:, 0]])[0], :]
5065 iteridx.append(dat.x[-1, 1]) # last entry is artificial
5066 dat.x = dat.x[np.where([x in iteridx for x in dat.x[:, 0]])[0], :]
5067 dat.std = dat.std[np.where([x in iteridx for x in dat.std[:, 0]])[0], :]
5068
5069 if iabscissa == 0:
5070 xlab = 'iterations'
5071 elif iabscissa == 1:
5072 xlab = 'function evaluations'
5073
5074 # use fake last entry in x and std for line extension-annotation
5075 if dat.x.shape[1] < 100:
5076 minxend = int(1.06 * dat.x[-2, iabscissa])
5077 # write y-values for individual annotation into dat.x
5078 dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
5079 idx = np.argsort(dat.x[-2, 5:])
5080 idx2 = np.argsort(idx)
5081 if x_opt is None:
5082 dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]),
5083 np.max(dat.x[:, 5:]), dat.x.shape[1] - 5)
5084 else:
5085 dat.x[-1, 5 + idx] = np.logspace(np.log10(np.min(abs(dat.x[:, 5:]))),
5086 np.log10(np.max(abs(dat.x[:, 5:]))), dat.x.shape[1] - 5)
5087 else:
5088 minxend = 0
5089
5090 if len(dat.f) == 0:
5091 print('nothing to plot')
5092 return
5093
5094 # not in use anymore, see formatter above
5095 # xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
5096
5097 # dfit(dfit<1e-98) = NaN;
5098
5099 # TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
5100
5101 subplot(2, 2, 1)
5102 self.plotdivers(dat, iabscissa, foffset)
5103
5104 # TODO: modularize also the remaining subplots
5105 subplot(2, 2, 2)
5106 hold(False)
5107 if x_opt is not None: # TODO: differentate neg and pos?
5108 semilogy(dat.x[:, iabscissa], abs(dat.x[:, 5:]) - x_opt, '-')
5109 else:
5110 plot(dat.x[:, iabscissa], dat.x[:, 5:], '-')
5111 hold(True)
5112 grid(True)
5113 ax = array(axis())
5114 # ax[1] = max(minxend, ax[1])
5115 axis(ax)
5116 ax[1] -= 1e-6
5117 if dat.x.shape[1] < 100:
5118 yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5)
5119 # yyl = np.sort(dat.x[-1,5:])
5120 idx = np.argsort(dat.x[-1, 5:])
5121 idx2 = np.argsort(idx)
5122 if x_opt is not None:
5123 semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1, 5:]), yy[idx2]], 'k-') # line from last data point
5124 semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]), array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
5125 else:
5126 # plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
5127 plot(np.dot(dat.x[-2, iabscissa], [1, 1]), array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
5128 # plot(array([dat.x[-1, iabscissa], ax[1]]),
5129 # reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
5130 for i in range(len(idx)):
5131 # TODOqqq: annotate phenotypic value!?
5132 # text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
5133 text(dat.x[-1, iabscissa], dat.x[-1, 5 + i], 'x(' + str(i) + ')=' + str(dat.x[-2, 5 + i]))
5134
5135 i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
5136 while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
5137 i += 1
5138 title('Object Variables (' + ('mean' if plot_mean else 'curr best') +
5139 ', ' + str(dat.x.shape[1] - 5) + '-D, popsize~' +
5140 (str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
5141 if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
5142 + ')')
5143 # pyplot.xticks(xticklocs)
5144
5145 # Scaling
5146 subplot(2, 2, 3)
5147 hold(False)
5148 semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
5149 hold(True)
5150 grid(True)
5151 ax = array(axis())
5152 # ax[1] = max(minxend, ax[1])
5153 axis(ax)
5154 title('Scaling (All Main Axes)')
5155 # pyplot.xticks(xticklocs)
5156 xlabel(xlab)
5157
5158 # standard deviations
5159 subplot(2, 2, 4)
5160 hold(False)
5161 # remove sigma from stds (graphs become much better readible)
5162 dat.std[:, 5:] = np.transpose(dat.std[:, 5:].T / dat.std[:, 2].T)
5163 # ax = array(axis())
5164 # ax[1] = max(minxend, ax[1])
5165 # axis(ax)
5166 if 1 < 2 and dat.std.shape[1] < 100:
5167 # use fake last entry in x and std for line extension-annotation
5168 minxend = int(1.06 * dat.x[-2, iabscissa])
5169 dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
5170 idx = np.argsort(dat.std[-2, 5:])
5171 idx2 = np.argsort(idx)
5172 dat.std[-1, 5 + idx] = np.logspace(np.log10(np.min(dat.std[:, 5:])),
5173 np.log10(np.max(dat.std[:, 5:])), dat.std.shape[1] - 5)
5174
5175 dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
5176 yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1] - 5)
5177 # yyl = np.sort(dat.std[-1,5:])
5178 idx = np.argsort(dat.std[-1, 5:])
5179 idx2 = np.argsort(idx)
5180 # plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
5181 # vertical separator
5182 plot(np.dot(dat.std[-2, iabscissa], [1, 1]), array([np.min(dat.std[-2, 5:]), np.max(dat.std[-2, 5:])]), 'k-')
5183 hold(True)
5184 # plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
5185 for i in rglen((idx)):
5186 # text(ax[1], yy[i], ' '+str(idx[i]))
5187 text(dat.std[-1, iabscissa], dat.std[-1, 5 + i], ' ' + str(i))
5188 semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-')
5189 grid(True)
5190 title('Standard Deviations in All Coordinates')
5191 # pyplot.xticks(xticklocs)
5192 xlabel(xlab)
5193 pyplot.ion()
5194 pyplot.draw() # update "screen"
5195 pyplot.show() # show figure
5196 # matplotlib.interactive(interactive_status)
5197
5198 return self
5199
5200 # ____________________________________________________________
5201 # ____________________________________________________________
5202 #
5203 @staticmethod
5204 - def plotdivers(dat, iabscissa, foffset):
5205 """helper function for `plot()` that plots all what is
5206 in the upper left subplot like fitness, sigma, etc.
5207
5208 Arguments
5209 ---------
5210 `iabscissa` in ``(0,1)``
5211 0==versus fevals, 1==versus iteration
5212 `foffset`
5213 offset to fitness for log-plot
5214
5215 :See: `plot()`
5216
5217 """
5218 from matplotlib.pyplot import semilogy, hold, grid, \
5219 axis, title, text
5220 fontsize = pyplot.rcParams['font.size']
5221
5222 # interactive_status = matplotlib.is_interactive()
5223 pyplot.ioff() # prevents immediate drawing
5224 hold(False)
5225
5226 dfit = dat.f[:, 5] - min(dat.f[:, 5])
5227 dfit[dfit < 1e-98] = np.NaN
5228
5229 if dat.f.shape[1] > 7:
5230 # semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
5231 semilogy(dat.f[:, iabscissa], abs(dat.f[:, [6, 7]]) + foffset, '-k')
5232 hold(True)
5233
5234 # (larger indices): additional fitness data, for example constraints values
5235 if dat.f.shape[1] > 8:
5236 # dd = abs(dat.f[:,7:]) + 10*foffset
5237 # dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
5238 semilogy(dat.f[:, iabscissa], np.abs(dat.f[:, 8:]) + 10 * foffset, 'm')
5239 hold(True)
5240
5241 idx = np.where(dat.f[:, 5] > 1e-98)[0] # positive values
5242 semilogy(dat.f[idx, iabscissa], dat.f[idx, 5] + foffset, '.b')
5243 hold(True)
5244 grid(True)
5245
5246 idx = np.where(dat.f[:, 5] < -1e-98)[0] # negative values
5247 semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset, '.r')
5248
5249 semilogy(dat.f[:, iabscissa], abs(dat.f[:, 5]) + foffset, '-b')
5250 idx = np.isfinite(dfit)
5251 semilogy(dat.f[:, iabscissa][idx], dfit[idx], '-c')
5252
5253 # overall minimum
5254 i = np.argmin(dat.f[:, 5])
5255 semilogy(dat.f[i, iabscissa] * np.ones(2), dat.f[i, 5] * np.ones(2), 'rd')
5256 # semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
5257
5258 # AR and sigma
5259 semilogy(dat.f[:, iabscissa], dat.f[:, 3], '-r') # AR
5260 semilogy(dat.f[:, iabscissa], dat.f[:, 2], '-g') # sigma
5261 semilogy(dat.std[:-1, iabscissa], np.vstack([list(map(max, dat.std[:-1, 5:])), list(map(min, dat.std[:-1, 5:]))]).T,
5262 '-m', linewidth=2)
5263 text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize)
5264 text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize)
5265 ax = array(axis())
5266 # ax[1] = max(minxend, ax[1])
5267 axis(ax)
5268 text(ax[0] + 0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
5269 '.f_recent=' + repr(dat.f[-1, 5]))
5270
5271 # title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
5272 title('blue:abs(f), cyan:f-min(f), green:sigma, red:axis ratio', fontsize=fontsize - 1)
5273 # pyplot.xticks(xticklocs)
5274 pyplot.ion()
5275 pyplot.draw() # update "screen"
5276 pyplot.show() # show figure
5277 # matplotlib.interactive(interactive_status)
5278
5279
5280 - def downsampling(self, factor=10, first=3, switch=True, verbose=True):
5281 """
5282 rude downsampling of a `CMADataLogger` data file by `factor`, keeping
5283 also the first `first` entries. This function is a stump and subject
5284 to future changes. Return self.
5285
5286 Arguments
5287 ---------
5288 - `factor` -- downsampling factor
5289 - `first` -- keep first `first` entries
5290 - `switch` -- switch the new logger to the downsampled logger original_name+'down'
5291
5292 Details
5293 -------
5294 ``self.name_prefix+'down'`` files are written
5295
5296 Example
5297 -------
5298 ::
5299
5300 import cma
5301 cma.downsampling() # takes outcmaes* files
5302 cma.plot('outcmaesdown')
5303
5304 """
5305 newprefix = self.name_prefix + 'down'
5306 for name in self.file_names:
5307 f = open(newprefix + name + '.dat', 'w')
5308 iline = 0
5309 cwritten = 0
5310 for line in open(self.name_prefix + name + '.dat'):
5311 if iline < first or iline % factor == 0:
5312 f.write(line)
5313 cwritten += 1
5314 iline += 1
5315 f.close()
5316 if verbose and iline > first:
5317 print('%d' % (cwritten) + ' lines written in ' + newprefix + name + '.dat')
5318 if switch:
5319 self.name_prefix += 'down'
5320 return self
5321
5322 # ____________________________________________________________
5323 # ____________________________________________________________
5324 #
5325 - def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
5326 """displays selected data from (files written by) the class `CMADataLogger`.
5327
5328 Arguments
5329 ---------
5330 `idx`
5331 indices corresponding to rows in the data file;
5332 if idx is a scalar (int), the first two, then every idx-th,
5333 and the last three rows are displayed. Too large index values are removed.
5334
5335 Example
5336 -------
5337 >>> import cma, numpy as np
5338 >>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data
5339 >>> assert res[1] < 1e-9
5340 >>> assert res[2] < 4400
5341 >>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
5342 >>> l.disp([0,-1]) # first and last
5343 >>> l.disp(20) # some first/last and every 20-th line
5344 >>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
5345 >>> l.disp(np.r_[0, -10:0]) # first and ten last
5346 >>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
5347
5348 Details
5349 -------
5350 The data line with the best f-value is displayed as last line.
5351
5352 :See: `disp()`
5353
5354 """
5355
5356 filenameprefix = self.name_prefix
5357
5358 def printdatarow(dat, iteration):
5359 """print data of iteration i"""
5360 i = np.where(dat.f[:, 0] == iteration)[0][0]
5361 j = np.where(dat.std[:, 0] == iteration)[0][0]
5362 print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) +
5363 ' %5.1e' % (dat.f[i, 3]) +
5364 ' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:]))
5365
5366 dat = CMADataLogger(filenameprefix).load()
5367 ndata = dat.f.shape[0]
5368
5369 # map index to iteration number, is difficult if not all iteration numbers exist
5370 # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
5371 # otherwise:
5372 if idx is None:
5373 idx = 100
5374 if np.isscalar(idx):
5375 # idx = np.arange(0, ndata, idx)
5376 if idx:
5377 idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0]
5378 else:
5379 idx = np.r_[0, 1, -3:0]
5380
5381 idx = array(idx)
5382 idx = idx[idx < ndata]
5383 idx = idx[-idx <= ndata]
5384 iters = dat.f[idx, 0]
5385 idxbest = np.argmin(dat.f[:, 5])
5386 iterbest = dat.f[idxbest, 0]
5387
5388 if len(iters) == 1:
5389 printdatarow(dat, iters[0])
5390 else:
5391 self.disp_header()
5392 for i in iters:
5393 printdatarow(dat, i)
5394 self.disp_header()
5395 printdatarow(dat, iterbest)
5396 sys.stdout.flush()
5397 - def disp_header(self):
5398 heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
5399 print(heading)
5400
5401 # end class CMADataLogger
5402
5403 # ____________________________________________________________
5404 # ____________________________________________________________
5405 #
5406 # _____________________________________________________________________
5407 # _____________________________________________________________________
5408 #
5409 -class DEAPCMADataLogger(BaseDataLogger):
5410 """data logger for class `deap.cma.Strategy` of the DEAP library.
5411 Documentation is not entirely adapted to the deap case.
5412
5413 The logger is identified by its name prefix and writes or reads according
5414 data files.
5415
5416 Examples
5417 ========
5418 ::
5419
5420 import cma_logger
5421 es = deap.cma.Strategy(...)
5422 data = cma.DEAPCMADataLogger().register(es)
5423 while not es.stop():
5424 ...
5425 data.add(fitness_values) # add can also take `es` as additional argument
5426
5427 data.plot() # or a short cut can be used:
5428 cma.plot() # plot data from logger with default name
5429
5430
5431 data2 = cma.DEAPCMADataLogger(another_filename_prefix).load()
5432 data2.plot()
5433 data2.disp()
5434
5435 ::
5436
5437 import cma
5438 from pyplot import *
5439 res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
5440 dat = res[-1] # the CMADataLogger
5441 dat.load() # by "default" data are on disk
5442 semilogy(dat.f[:,0], dat.f[:,5]) # plot f versus iteration, see file header
5443 show()
5444
5445 Details
5446 =======
5447 After loading data, the logger has the attributes `xmean`, `xrecent`, `std`, `f`, and `D`,
5448 corresponding to xmean, xrecentbest, stddev, fit, and axlen filename trails.
5449
5450 :See: `disp()`, `plot()`
5451
5452 """
5453 default_prefix = 'outcmaes'
5454 names = ('axlen', 'fit', 'stddev', 'xmean') # ,'xrecentbest')
5455 key_names_with_annotation = ('std', 'xmean')
5456
5457 - def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
5458 """initialize logging of data from a `CMAEvolutionStrategy` instance,
5459 default modulo expands to 1 == log with each call
5460
5461 """
5462 # super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[], 'sig':[], 'fit':[], 'xm':[]})
5463 # class properties:
5464 self.counter = 0 # number of calls of add
5465 self.best_fitness = np.inf
5466 self.modulo = modulo # allows calling with None
5467 self.append = append
5468 self.name_prefix = name_prefix if name_prefix else CMADataLogger.default_prefix
5469 if isinstance(self.name_prefix, CMAEvolutionStrategy):
5470 self.name_prefix = self.name_prefix.opts.eval('verb_filenameprefix')
5471 self.registered = False
5472
5473 - def register(self, es, append=None, modulo=None):
5474 """register a `CMAEvolutionStrategy` instance for logging,
5475 ``append=True`` appends to previous data logged under the same name,
5476 by default previous data are overwritten.
5477
5478 """
5479 self.es = es
5480 if append is not None:
5481 self.append = append
5482 if modulo is not None:
5483 self.modulo = modulo
5484 if not self.append and self.modulo != 0:
5485 self.initialize() # write file headers
5486 self.registered = True
5487 return self
5488
5489 - def initialize(self, modulo=None):
5490 """reset logger, overwrite original files, `modulo`: log only every modulo call"""
5491 if modulo is not None:
5492 self.modulo = modulo
5493 try:
5494 es = self.es # must have been registered
5495 except AttributeError:
5496 pass # TODO: revise usage of es... that this can pass
5497 raise _Error('call register() before initialize()')
5498
5499 # write headers for output
5500 fn = self.name_prefix + 'fit.dat'
5501 if 1 < 3:
5502 strseedtime = 'seed=unkown, %s' % (time.asctime())
5503
5504 try:
5505 with open(fn, 'w') as f:
5506 f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
5507 'bestever, best, median, worst objective function value, ' +
5508 'further objective values of best", ' +
5509 strseedtime +
5510 # strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
5511 '\n')
5512 except (IOError, OSError):
5513 print('could not open file ' + fn)
5514
5515 fn = self.name_prefix + 'axlen.dat'
5516 try:
5517 f = open(fn, 'w')
5518 f.write('% columns="iteration, evaluation, sigma, max axis length, ' +
5519 ' min axis length, all principle axes lengths ' +
5520 ' (sorted square roots of eigenvalues of C)", ' +
5521 strseedtime +
5522 '\n')
5523 f.close()
5524 except (IOError, OSError):
5525 print('could not open file ' + fn)
5526 finally:
5527 f.close()
5528 fn = self.name_prefix + 'stddev.dat'
5529 try:
5530 f = open(fn, 'w')
5531 f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
5532 ' stds==sigma*sqrt(diag(C))", ' +
5533 strseedtime +
5534 '\n')
5535 f.close()
5536 except (IOError, OSError):
5537 print('could not open file ' + fn)
5538 finally:
5539 f.close()
5540
5541 fn = self.name_prefix + 'xmean.dat'
5542 try:
5543 with open(fn, 'w') as f:
5544 f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
5545 strseedtime)
5546 f.write('\n')
5547 f.close()
5548 except (IOError, OSError):
5549 print('could not open/write file ' + fn)
5550
5551 return self
5552 # end def __init__
5553
5554 - def load(self, filenameprefix=None):
5555 """loads data from files written and return a data dictionary, *not*
5556 a prerequisite for using `plot()` or `disp()`.
5557
5558 Argument `filenameprefix` is the filename prefix of data to be loaded (five files),
5559 by default ``'outcmaes'``.
5560
5561 Return data dictionary with keys `xrecent`, `xmean`, `f`, `D`, `std`
5562
5563 """
5564 if not filenameprefix:
5565 filenameprefix = self.name_prefix
5566 dat = self # historical
5567 # dat.xrecent = _fileToMatrix(filenameprefix + 'xrecentbest.dat')
5568 dat.xmean = _fileToMatrix(filenameprefix + 'xmean.dat')
5569 dat.std = _fileToMatrix(filenameprefix + 'stddev' + '.dat')
5570 # a hack to later write something into the last entry
5571 for key in ['xmean', 'std']: # 'xrecent',
5572 dat.__dict__[key].append(dat.__dict__[key][-1]) # copy last row to later fill in annotation position for display
5573 dat.__dict__[key] = array(dat.__dict__[key], copy=False)
5574 dat.f = array(_fileToMatrix(filenameprefix + 'fit.dat'))
5575 dat.D = array(_fileToMatrix(filenameprefix + 'axlen' + '.dat'))
5576 return dat
5577
5578
5579 - def add(self, fitness_values, es=None, more_data=[], modulo=None): # TODO: find a different way to communicate current x and f
5580 """append some logging data from `CMAEvolutionStrategy` class instance `es`,
5581 if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
5582
5583 The sequence ``more_data`` must always have the same length.
5584
5585 """
5586 self.counter += 1
5587 fitness_values = np.sort(fitness_values)
5588 if fitness_values[0] < self.best_fitness:
5589 self.best_fitness = fitness_values[0]
5590 mod = modulo if modulo is not None else self.modulo
5591 if mod == 0 or (self.counter > 3 and self.counter % mod):
5592 return
5593 if es is None:
5594 try:
5595 es = self.es # must have been registered
5596 except AttributeError :
5597 raise _Error('call register() before add() or add(es)')
5598 elif not self.registered:
5599 self.register(es)
5600
5601 try:
5602 # fit
5603 if es.update_count > 0:
5604 # fit = es.fit.fit[0] # TODO: where do we get the fitness from?
5605 fn = self.name_prefix + 'fit.dat'
5606 with open(fn, 'a') as f:
5607 f.write(str(es.update_count) + ' '
5608 + str(es.update_count * es.lambda_) + ' '
5609 + str(es.sigma) + ' '
5610 + str(es.diagD[-1] / es.diagD[0]) + ' '
5611 + str(self.best_fitness) + ' '
5612 + '%.16e' % fitness_values[0] + ' '
5613 + str(fitness_values[es.lambda_ // 2]) + ' '
5614 + str(fitness_values[-1]) + ' '
5615 # + str(es.sp.popsize) + ' '
5616 # + str(10**es.noiseS) + ' '
5617 # + str(es.sp.cmean) + ' '
5618 # + ' '.join(str(i) for i in es.more_to_write)
5619 + ' '.join(str(i) for i in more_data)
5620 + '\n')
5621 # es.more_to_write = []
5622 # axlen
5623 fn = self.name_prefix + 'axlen.dat'
5624 with open(fn, 'a') as f: # does not rely on reference counting
5625 f.write(str(es.update_count) + ' '
5626 + str(es.update_count * es.lambda_) + ' '
5627 + str(es.sigma) + ' '
5628 + str(es.diagD[-1]) + ' '
5629 + str(es.diagD[0]) + ' '
5630 + ' '.join(map(str, es.diagD))
5631 + '\n')
5632 # stddev
5633 fn = self.name_prefix + 'stddev.dat'
5634 with open(fn, 'a') as f:
5635 f.write(str(es.update_count) + ' '
5636 + str(es.update_count * es.lambda_) + ' '
5637 + str(es.sigma) + ' '
5638 + '0 0 '
5639 + ' '.join(map(str, es.sigma * np.sqrt([es.C[i][i] for i in xrange(es.dim)])))
5640 + '\n')
5641 # xmean
5642 fn = self.name_prefix + 'xmean.dat'
5643 with open(fn, 'a') as f:
5644 if es.update_count < 1:
5645 f.write('0 0 0 0 0 '
5646 + ' '.join(map(str,
5647 # TODO should be optional the phenotyp?
5648 # es.x0
5649 es.mean))
5650 + '\n')
5651 else:
5652 f.write(str(es.update_count) + ' '
5653 + str(es.update_count * es.lambda_) + ' '
5654 # + str(es.sigma) + ' '
5655 + '0 0 0 '
5656 # + str(es.fmean_noise_free) + ' '
5657 # + str(es.fmean) + ' ' # TODO: this does not make sense
5658 # TODO should be optional the phenotyp?
5659 + ' '.join(map(str, es.centroid))
5660 + '\n')
5661 # xrecent
5662 except (IOError, OSError):
5663 if es.countiter == 1:
5664 print('could not open/write file')
5665
5666 - def closefig(self):
5667 pyplot.close(self.fighandle)
5668
5669 - def save(self, nameprefix, switch=False):
5670 """saves logger data to a different set of files, for
5671 ``switch=True`` also the loggers name prefix is switched to
5672 the new value
5673
5674 """
5675 if not nameprefix or not isinstance(nameprefix, basestring):
5676 raise _Error('filename prefix must be a nonempty string')
5677
5678 if nameprefix == self.default_prefix:
5679 raise _Error('cannot save to default name "' + nameprefix + '...", chose another name')
5680
5681 if nameprefix == self.name_prefix:
5682 return
5683
5684 for name in CMADataLogger.names:
5685 open(nameprefix + name + '.dat', 'w').write(open(self.name_prefix + name + '.dat').read())
5686
5687 if switch:
5688 self.name_prefix = nameprefix
5689
5690 - def plot(self, fig=None, iabscissa=1, iteridx=None,
5691 plot_mean=False, # TODO: plot_mean default should be False
5692 foffset=1e-19, x_opt=None, fontsize=10):
5693 """
5694 plot data from a `DEAPCMADataLogger` (using the files written by the logger).
5695
5696 Arguments
5697 ---------
5698 `fig`
5699 figure number, by default 325
5700 `iabscissa`
5701 ``0==plot`` versus iteration count,
5702 ``1==plot`` versus function evaluation number
5703 `iteridx`
5704 iteration indices to plot
5705
5706 Return `CMADataLogger` itself.
5707
5708 Examples
5709 --------
5710 ::
5711
5712 import cma
5713 logger = cma.CMADataLogger() # with default name
5714 # try to plot the "default logging" data (e.g. from previous fmin calls)
5715 logger.plot() # to continue you might need to close the pop-up window
5716 # once and call plot() again.
5717 # This behavior seems to disappear in subsequent
5718 # calls of plot(). Also using ipython with -pylab
5719 # option might help.
5720 cma.savefig('fig325.png') # save current figure
5721 logger.closefig()
5722
5723 Dependencies: matlabplotlib/pyplot.
5724
5725 """
5726
5727 dat = self.load(self.name_prefix)
5728
5729 try:
5730 # pyplot: prodedural interface for matplotlib
5731 from matplotlib.pyplot import figure, ioff, ion, subplot, semilogy, hold, plot, grid, \
5732 axis, title, text, xlabel, isinteractive, draw, gcf
5733
5734 except ImportError:
5735 ImportError('could not find matplotlib.pyplot module, function plot() is not available')
5736 return
5737
5738 if fontsize and pyplot.rcParams['font.size'] != fontsize:
5739 print('global variable pyplot.rcParams[\'font.size\'] set (from ' +
5740 str(pyplot.rcParams['font.size']) + ') to ' + str(fontsize))
5741 pyplot.rcParams['font.size'] = fontsize # subtracted in the end, but return can happen inbetween
5742
5743 if fig:
5744 figure(fig)
5745 else:
5746 figure(325)
5747 # show() # should not be necessary
5748 self.fighandle = gcf() # fighandle.number
5749
5750 if iabscissa not in (0, 1):
5751 iabscissa = 1
5752 interactive_status = isinteractive()
5753 ioff() # prevents immediate drawing
5754
5755 dat.x = dat.xmean # this is the genotyp
5756 if not plot_mean:
5757 if len(dat.x) < 2:
5758 print('not enough data to plot recent x, using mean instead')
5759 else:
5760 dat.x = dat.xrecent
5761 if iteridx is not None:
5762 dat.f = dat.f[np.where([x in iteridx for x in dat.f[:, 0]])[0], :]
5763 dat.D = dat.D[np.where([x in iteridx for x in dat.D[:, 0]])[0], :]
5764 iteridx.append(dat.x[-1, 1]) # last entry is artificial
5765 dat.x = dat.x[np.where([x in iteridx for x in dat.x[:, 0]])[0], :]
5766 dat.std = dat.std[np.where([x in iteridx for x in dat.std[:, 0]])[0], :]
5767
5768 if iabscissa == 0:
5769 xlab = 'iterations'
5770 elif iabscissa == 1:
5771 xlab = 'function evaluations'
5772
5773 # use fake last entry in x and std for line extension-annotation
5774 if dat.x.shape[1] < 100:
5775 minxend = int(1.06 * dat.x[-2, iabscissa])
5776 # write y-values for individual annotation into dat.x
5777 dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
5778 idx = np.argsort(dat.x[-2, 5:])
5779 idx2 = np.argsort(idx)
5780 if x_opt is None:
5781 dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]),
5782 np.max(dat.x[:, 5:]), dat.x.shape[1] - 5)
5783 else:
5784 dat.x[-1, 5 + idx] = np.logspace(np.log10(np.min(abs(dat.x[:, 5:]))),
5785 np.log10(np.max(abs(dat.x[:, 5:]))), dat.x.shape[1] - 5)
5786 else:
5787 minxend = 0
5788
5789 if len(dat.f) == 0:
5790 print('nothing to plot')
5791 return
5792
5793 # not in use anymore, see formatter above
5794 # xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
5795
5796 # dfit(dfit<1e-98) = NaN;
5797
5798 ioff() # turns update off
5799
5800 # TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
5801
5802 subplot(2, 2, 1)
5803 self.plotdivers(dat, iabscissa, foffset)
5804
5805 # TODO: modularize also the remaining subplots
5806 subplot(2, 2, 2)
5807 hold(False)
5808 if x_opt is not None: # TODO: differentate neg and pos?
5809 semilogy(dat.x[:, iabscissa], abs(dat.x[:, 5:]) - x_opt, '-')
5810 else:
5811 plot(dat.x[:, iabscissa], dat.x[:, 5:], '-')
5812 hold(True)
5813 grid(True)
5814 ax = array(axis())
5815 # ax[1] = max(minxend, ax[1])
5816 axis(ax)
5817 ax[1] -= 1e-6
5818 if dat.x.shape[1] < 100:
5819 yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5)
5820 # yyl = np.sort(dat.x[-1,5:])
5821 idx = np.argsort(dat.x[-1, 5:])
5822 idx2 = np.argsort(idx)
5823 if x_opt is not None:
5824 semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1, 5:]), yy[idx2]], 'k-') # line from last data point
5825 semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]), array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
5826 else:
5827 # plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
5828 plot(np.dot(dat.x[-2, iabscissa], [1, 1]), array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
5829 # plot(array([dat.x[-1, iabscissa], ax[1]]),
5830 # reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
5831 for i in range(len(idx)):
5832 # TODOqqq: annotate phenotypic value!?
5833 # text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
5834 text(dat.x[-1, iabscissa], dat.x[-1, 5 + i], 'x(' + str(i) + ')=' + str(dat.x[-2, 5 + i]))
5835
5836 i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
5837 while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
5838 i += 1
5839 title('Object Variables (' + ('mean' if plot_mean else 'curr best') +
5840 ', ' + str(dat.x.shape[1] - 5) + '-D, popsize~' +
5841 (str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
5842 if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
5843 + ')')
5844 # pyplot.xticks(xticklocs)
5845
5846 # Scaling
5847 subplot(2, 2, 3)
5848 hold(False)
5849 semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
5850 hold(True)
5851 grid(True)
5852 ax = array(axis())
5853 # ax[1] = max(minxend, ax[1])
5854 axis(ax)
5855 title('Scaling (All Main Axes)')
5856 # pyplot.xticks(xticklocs)
5857 xlabel(xlab)
5858
5859 # standard deviations
5860 subplot(2, 2, 4)
5861 hold(False)
5862 # remove sigma from stds (graphs become much better readible)
5863 dat.std[:, 5:] = np.transpose(dat.std[:, 5:].T / dat.std[:, 2].T)
5864 # ax = array(axis())
5865 # ax[1] = max(minxend, ax[1])
5866 # axis(ax)
5867 if 1 < 2 and dat.std.shape[1] < 100:
5868 # use fake last entry in x and std for line extension-annotation
5869 minxend = int(1.06 * dat.x[-2, iabscissa])
5870 dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
5871 idx = np.argsort(dat.std[-2, 5:])
5872 idx2 = np.argsort(idx)
5873 dat.std[-1, 5 + idx] = np.logspace(np.log10(np.min(dat.std[:, 5:])),
5874 np.log10(np.max(dat.std[:, 5:])), dat.std.shape[1] - 5)
5875
5876 dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
5877 yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1] - 5)
5878 # yyl = np.sort(dat.std[-1,5:])
5879 idx = np.argsort(dat.std[-1, 5:])
5880 idx2 = np.argsort(idx)
5881 # plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
5882 # vertical separator
5883 plot(np.dot(dat.std[-2, iabscissa], [1, 1]), array([np.min(dat.std[-2, 5:]), np.max(dat.std[-2, 5:])]), 'k-')
5884 hold(True)
5885 # plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
5886 for i in rglen((idx)):
5887 # text(ax[1], yy[i], ' '+str(idx[i]))
5888 text(dat.std[-1, iabscissa], dat.std[-1, 5 + i], ' ' + str(i))
5889 semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-')
5890 grid(True)
5891 title('Standard Deviations in All Coordinates')
5892 # pyplot.xticks(xticklocs)
5893 xlabel(xlab)
5894 ion()
5895 draw() # does not suffice
5896 show()
5897
5898 return self
5899
5900
5901 # ____________________________________________________________
5902 # ____________________________________________________________
5903 #
5904 @staticmethod
5905 - def plotdivers(dat, iabscissa, foffset):
5906 """helper function for `plot()` that plots all what is
5907 in the upper left subplot like fitness, sigma, etc.
5908
5909 Arguments
5910 ---------
5911 `iabscissa` in ``(0,1)``
5912 0==versus fevals, 1==versus iteration
5913 `foffset`
5914 offset to fitness for log-plot
5915
5916 :See: `plot()`
5917
5918 """
5919 from matplotlib.pyplot import semilogy, hold, grid, \
5920 axis, title, text
5921 fontsize = pyplot.rcParams['font.size']
5922
5923 hold(False)
5924
5925 dfit = dat.f[:, 5] - min(dat.f[:, 5])
5926 dfit[dfit < 1e-98] = np.NaN
5927
5928 if dat.f.shape[1] > 7:
5929 # semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
5930 semilogy(dat.f[:, iabscissa], abs(dat.f[:, [6, 7]]) + foffset, '-k')
5931 hold(True)
5932
5933 # (larger indices): additional fitness data, for example constraints values
5934 if dat.f.shape[1] > 8:
5935 # dd = abs(dat.f[:,7:]) + 10*foffset
5936 # dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
5937 semilogy(dat.f[:, iabscissa], np.abs(dat.f[:, 8:]) + 10 * foffset, 'm')
5938 hold(True)
5939
5940 idx = np.where(dat.f[:, 5] > 1e-98)[0] # positive values
5941 semilogy(dat.f[idx, iabscissa], dat.f[idx, 5] + foffset, '.b')
5942 hold(True)
5943 grid(True)
5944
5945 idx = np.where(dat.f[:, 5] < -1e-98) # negative values
5946 semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset, '.r')
5947
5948 semilogy(dat.f[:, iabscissa], abs(dat.f[:, 5]) + foffset, '-b')
5949 semilogy(dat.f[:, iabscissa], dfit, '-c')
5950
5951 # overall minimum
5952 i = np.argmin(dat.f[:, 5])
5953 semilogy(dat.f[i, iabscissa] * np.ones(2), dat.f[i, 5] * np.ones(2), 'rd')
5954 # semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
5955
5956 # AR and sigma
5957 semilogy(dat.f[:, iabscissa], dat.f[:, 3], '-r') # AR
5958 semilogy(dat.f[:, iabscissa], dat.f[:, 2], '-g') # sigma
5959 semilogy(dat.std[:-1, iabscissa], np.vstack([list(map(max, dat.std[:-1, 5:])), list(map(min, dat.std[:-1, 5:]))]).T,
5960 '-m', linewidth=2)
5961 text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize)
5962 text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize)
5963 ax = array(axis())
5964 # ax[1] = max(minxend, ax[1])
5965 axis(ax)
5966 text(ax[0] + 0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
5967 '.f_recent=' + repr(dat.f[-1, 5]))
5968
5969 # title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
5970 title('blue:abs(f), cyan:f-min(f), green:sigma, red:axis ratio', fontsize=fontsize - 1)
5971 # pyplot.xticks(xticklocs)
5972
5973
5974
5975 - def downsampling(self, factor=10, first=3, switch=True):
5976 """
5977 rude downsampling of a `CMADataLogger` data file by `factor`, keeping
5978 also the first `first` entries. This function is a stump and subject
5979 to future changes.
5980
5981 Arguments
5982 ---------
5983 - `factor` -- downsampling factor
5984 - `first` -- keep first `first` entries
5985 - `switch` -- switch the new logger name to oldname+'down'
5986
5987 Details
5988 -------
5989 ``self.name_prefix+'down'`` files are written
5990
5991 Example
5992 -------
5993 ::
5994
5995 import cma
5996 cma.downsampling() # takes outcmaes* files
5997 cma.plot('outcmaesdown')
5998
5999 """
6000 newprefix = self.name_prefix + 'down'
6001 for name in CMADataLogger.names:
6002 f = open(newprefix + name + '.dat', 'w')
6003 iline = 0
6004 cwritten = 0
6005 for line in open(self.name_prefix + name + '.dat'):
6006 if iline < first or iline % factor == 0:
6007 f.write(line)
6008 cwritten += 1
6009 iline += 1
6010 f.close()
6011 print('%d' % (cwritten) + ' lines written in ' + newprefix + name + '.dat')
6012 if switch:
6013 self.name_prefix += 'down'
6014 return self
6015
6016 # ____________________________________________________________
6017 # ____________________________________________________________
6018 #
6019 - def disp_header(self):
6020 heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
6021 print(heading)
6022
6023 - def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
6024 """displays selected data from (files written by) the class `CMADataLogger`.
6025
6026 Arguments
6027 ---------
6028 `idx`
6029 indices corresponding to rows in the data file;
6030 if idx is a scalar (int), the first two, then every idx-th,
6031 and the last three rows are displayed. Too large index values are removed.
6032 If ``len(idx) == 1``, only a single row is displayed, e.g. the last
6033 entry when ``idx == [-1]``.
6034
6035 Example
6036 -------
6037 >>> import cma, numpy as np
6038 >>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data
6039 >>> assert res[1] < 1e-9
6040 >>> assert res[2] < 4400
6041 >>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
6042 >>> l.disp([0,-1]) # first and last
6043 >>> l.disp(20) # some first/last and every 20-th line
6044 >>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
6045 >>> l.disp(np.r_[0, -10:0]) # first and ten last
6046 >>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
6047
6048 Details
6049 -------
6050 The data line with the best f-value is displayed as last line.
6051
6052 :See: `disp()`
6053
6054 """
6055
6056 filenameprefix = self.name_prefix
6057
6058 def printdatarow(dat, iteration):
6059 """print data of iteration i"""
6060 i = np.where(dat.f[:, 0] == iteration)[0][0]
6061 j = np.where(dat.std[:, 0] == iteration)[0][0]
6062 print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) +
6063 ' %5.1e' % (dat.f[i, 3]) +
6064 ' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:]))
6065
6066 dat = CMADataLogger(filenameprefix).load()
6067 ndata = dat.f.shape[0]
6068
6069 # map index to iteration number, is difficult if not all iteration numbers exist
6070 # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
6071 # otherwise:
6072 if idx is None:
6073 idx = 100
6074 if np.isscalar(idx):
6075 # idx = np.arange(0, ndata, idx)
6076 if idx:
6077 idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0]
6078 else:
6079 idx = np.r_[0, 1, -3:0]
6080
6081 idx = array(idx)
6082 idx = idx[idx <= ndata] # TODO: shouldn't this be "<"?
6083 idx = idx[-idx <= ndata]
6084 iters = dat.f[idx, 0]
6085 idxbest = np.argmin(dat.f[:, 5])
6086 iterbest = dat.f[idxbest, 0]
6087 if len(iters) == 1:
6088 printdatarow(dat, iters[0])
6089 else:
6090 self.disp_header()
6091 for i in iters:
6092 printdatarow(dat, i)
6093 self.disp_header()
6094 printdatarow(dat, iterbest)
6095 sys.stdout.flush()
6096
6097 last_figure_number = 324
6098 -def plot(name=None, fig=None, abscissa=1, iteridx=None,
6099 plot_mean=False,
6100 foffset=1e-19, x_opt=None, fontsize=10):
6101 """
6102 plot data from files written by a `CMADataLogger`,
6103 the call ``cma.plot(name, **argsdict)`` is a shortcut for
6104 ``cma.CMADataLogger(name).plot(**argsdict)``
6105
6106 Arguments
6107 ---------
6108 `name`
6109 name of the logger, filename prefix, None evaluates to
6110 the default 'outcmaes'
6111 `fig`
6112 filename or figure number, or both as a tuple (any order)
6113 `abscissa`
6114 0==plot versus iteration count,
6115 1==plot versus function evaluation number
6116 `iteridx`
6117 iteration indices to plot
6118
6119 Return `None`
6120
6121 Examples
6122 --------
6123 ::
6124
6125 cma.plot(); # the optimization might be still
6126 # running in a different shell
6127 cma.savefig('fig325.png')
6128 cma.closefig()
6129
6130 cdl = cma.CMADataLogger().downsampling().plot()
6131 # in case the file sizes are large
6132
6133 Details
6134 -------
6135 Data from codes in other languages (C, Java, Matlab, Scilab) have the same
6136 format and can be plotted just the same.
6137
6138 :See: `CMADataLogger`, `CMADataLogger.plot()`
6139
6140 """
6141 global last_figure_number
6142 if not fig:
6143 last_figure_number += 1
6144 fig = last_figure_number
6145 last_figure_number = fig
6146 CMADataLogger(name).plot(fig, abscissa, iteridx, plot_mean, foffset,
6147 x_opt, fontsize)
6148
6149 -def disp(name=None, idx=None):
6150 """displays selected data from (files written by) the class `CMADataLogger`.
6151
6152 The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``.
6153
6154 Arguments
6155 ---------
6156 `name`
6157 name of the logger, filename prefix, `None` evaluates to
6158 the default ``'outcmaes'``
6159 `idx`
6160 indices corresponding to rows in the data file; by
6161 default the first five, then every 100-th, and the last
6162 10 rows. Too large index values are removed.
6163
6164 Examples
6165 --------
6166 ::
6167
6168 import cma, numpy
6169 # assume some data are available from previous runs
6170 cma.disp(None,numpy.r_[0,-1]) # first and last
6171 cma.disp(None,numpy.r_[0:1e9:100,-1]) # every 100-th and last
6172 cma.disp(idx=numpy.r_[0,-10:0]) # first and ten last
6173 cma.disp(idx=numpy.r_[0:1e9:1e3,-10:0])
6174
6175 :See: `CMADataLogger.disp()`
6176
6177 """
6178 return CMADataLogger(name if name else 'outcmaes'
6179 ).disp(idx)
6180
6181 # ____________________________________________________________
6182 -def _fileToMatrix(file_name):
6183 """rudimentary method to read in data from a file"""
6184 # TODO: np.loadtxt() might be an alternative
6185 # try:
6186 if 1 < 3:
6187 lres = []
6188 for line in open(file_name, 'r').readlines():
6189 if len(line) > 0 and line[0] not in ('%', '#'):
6190 lres.append(list(map(float, line.split())))
6191 res = lres
6192 while res != [] and res[0] == []: # remove further leading empty lines
6193 del res[0]
6194 return res
6195 # except:
6196 print('could not read file ' + file_name)
6197
6198 # ____________________________________________________________
6199 # ____________________________________________________________
6200 -class NoiseHandler(object):
6201 """Noise handling according to [Hansen et al 2009, A Method for
6202 Handling Uncertainty in Evolutionary Optimization...]
6203
6204 The interface of this class is yet versatile and subject to changes.
6205
6206 The attribute ``evaluations`` serves to control the noise via
6207 number of evaluations, for example in ``fmin`` or with
6208 `ask_and_eval()`. The parameter ``maxevals`` (second parameter)
6209 provides the upper bound, or lower and upper bound, or lower and
6210 upper bound and initial value, all 1 by default, compare also the
6211 second example.
6212
6213 Examples
6214 --------
6215 Minimal example together with `fmin` on a non-noisy function:
6216
6217 >>> import cma
6218 >>> cma.fmin(cma.felli, 7 * [1], 1, noise_handler=cma.NoiseHandler(7)) # dimension 7
6219
6220 More verbose example in the optimization loop with a noisy function
6221 defined in ``func``:
6222
6223 >>> import cma, numpy as np
6224 >>> func = lambda x: cma.fcts.sphere(x) * (1 + 4 * np.random.randn() / len(x)) # cma.Fcts.noisysphere
6225 >>> es = cma.CMAEvolutionStrategy(np.ones(10), 1)
6226 >>> nh = cma.NoiseHandler(es.N, maxevals=[1, 1, 30])
6227 >>> while not es.stop():
6228 ... X, fit_vals = es.ask_and_eval(func, evaluations=nh.evaluations)
6229 ... es.tell(X, fit_vals) # prepare for next iteration
6230 ... es.sigma *= nh(X, fit_vals, func, es.ask) # see method __call__
6231 ... es.countevals += nh.evaluations_just_done # this is a hack, not important though
6232 ... es.logger.add(more_data = [nh.evaluations, nh.noiseS]) # add a data point
6233 ... es.disp()
6234 ... # nh.maxevals = ... it might be useful to start with smaller values and then increase
6235 >>> print(es.stop())
6236 >>> print(es.result()[-2]) # take mean value, the best solution is totally off
6237 >>> assert sum(es.result()[-2]**2) < 1e-9
6238 >>> print(X[np.argmin(fit_vals)]) # not bad, but probably worse than the mean
6239 >>> # es.logger.plot()
6240
6241
6242 The command ``logger.plot()`` will plot the logged data.
6243
6244 The noise options of `fmin()` control a `NoiseHandler` instance similar to this
6245 example. The command ``cma.CMAOptions('noise')`` lists in effect the parameters of
6246 `__init__` apart from ``aggregate``.
6247
6248 Details
6249 -------
6250 The parameters reevals, theta, c_s, and alpha_t are set differently
6251 than in the original publication, see method `__init__()`. For a
6252 very small population size, say popsize <= 5, the measurement
6253 technique based on rank changes is likely to fail.
6254
6255 Missing Features
6256 ----------------
6257 In case no noise is found, ``self.lam_reeval`` should be adaptive
6258 and get at least as low as 1 (however the possible savings from this
6259 are rather limited). Another option might be to decide during the
6260 first call by a quantitative analysis of fitness values whether
6261 ``lam_reeval`` is set to zero. More generally, an automatic noise
6262 mode detection might also set the covariance matrix learning rates
6263 to smaller values.
6264
6265 :See: `fmin()`, `ask_and_eval()`
6266
6267 """
6268 # TODO: for const additive noise a better version might be with alphasigma also used for sigma-increment,
6269 # while all other variance changing sources are removed (because they are intrinsically biased). Then
6270 # using kappa to get convergence (with unit sphere samples): noiseS=0 leads to a certain kappa increasing rate?
6271 - def __init__(self, N, maxevals=[1, 1, 1], aggregate=np.median, reevals=None, epsilon=1e-7, parallel=False):
6272 """parameters are
6273
6274 `N`
6275 dimension, (only) necessary to adjust the internal "alpha"-parameters
6276 `maxevals`
6277 maximal value for ``self.evaluations``, where
6278 ``self.evaluations`` function calls are aggregated for
6279 noise treatment. With ``maxevals == 0`` the noise
6280 handler is (temporarily) "switched off". If `maxevals`
6281 is a list, min value and (for >2 elements) median are
6282 used to define minimal and initial value of
6283 ``self.evaluations``. Choosing ``maxevals > 1`` is only
6284 reasonable, if also the original ``fit`` values (that
6285 are passed to `__call__`) are computed by aggregation of
6286 ``self.evaluations`` values (otherwise the values are
6287 not comparable), as it is done within `fmin()`.
6288 `aggregate`
6289 function to aggregate single f-values to a 'fitness', e.g.
6290 ``np.median``.
6291 `reevals`
6292 number of solutions to be reevaluated for noise measurement,
6293 can be a float, by default set to ``2 + popsize/20``, where
6294 ``popsize = len(fit)`` in ``__call__``.
6295 zero switches noise handling off.
6296 `epsilon`
6297 multiplier for perturbation of the reevaluated solutions
6298 `parallel`
6299 a single f-call with all resampled solutions
6300
6301 :See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy.ask_and_eval()`
6302
6303 """
6304 self.lam_reeval = reevals # 2 + popsize/20, see method indices(), originally 2 + popsize/10
6305 self.epsilon = epsilon
6306 self.parallel = parallel
6307 self.theta = 0.5 # originally 0.2
6308 self.cum = 0.3 # originally 1, 0.3 allows one disagreement of current point with resulting noiseS
6309 self.alphasigma = 1 + 2 / (N + 10) # unit sphere sampling: 1 + 1 / (N + 10)
6310 self.alphaevals = 1 + 2 / (N + 10) # originally 1.5
6311 self.alphaevalsdown = self.alphaevals**-0.25 # originally 1/1.5
6312 # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
6313 self.evaluations = 1 # to aggregate for a single f-evaluation
6314 self.minevals = 1
6315 self.maxevals = int(np.max(maxevals))
6316 if hasattr(maxevals, '__contains__'): # i.e. can deal with ``in``
6317 if len(maxevals) > 1:
6318 self.minevals = min(maxevals)
6319 self.evaluations = self.minevals
6320 if len(maxevals) > 2:
6321 self.evaluations = np.median(maxevals)
6322 self.f_aggregate = aggregate
6323 self.evaluations_just_done = 0 # actually conducted evals, only for documentation
6324 self.noiseS = 0
6325
6326 - def __call__(self, X, fit, func, ask=None, args=()):
6327 """proceed with noise measurement, set anew attributes ``evaluations``
6328 (proposed number of evaluations to "treat" noise) and ``evaluations_just_done``
6329 and return a factor for increasing sigma.
6330
6331 Parameters
6332 ----------
6333 `X`
6334 a list/sequence/vector of solutions
6335 `fit`
6336 the respective list of function values
6337 `func`
6338 the objective function, ``fit[i]`` corresponds to ``func(X[i], *args)``
6339 `ask`
6340 a method to generate a new, slightly disturbed solution. The argument
6341 is (only) mandatory if ``epsilon`` is not zero, see `__init__()`.
6342 `args`
6343 optional additional arguments to `func`
6344
6345 Details
6346 -------
6347 Calls the methods ``reeval()``, ``update_measure()`` and ``treat()`` in this order.
6348 ``self.evaluations`` is adapted within the method `treat()`.
6349
6350 """
6351 self.evaluations_just_done = 0
6352 if not self.maxevals or self.lam_reeval == 0:
6353 return 1.0
6354 res = self.reeval(X, fit, func, ask, args)
6355 if not len(res):
6356 return 1.0
6357 self.update_measure()
6358 return self.treat()
6359
6360 - def get_evaluations(self):
6361 """return ``self.evaluations``, the number of evalutions to get a single fitness measurement"""
6362 return self.evaluations
6363
6364 - def treat(self):
6365 """adapt self.evaluations depending on the current measurement value
6366 and return ``sigma_fac in (1.0, self.alphasigma)``
6367
6368 """
6369 if self.noiseS > 0:
6370 self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))
6371 return self.alphasigma
6372 else:
6373 self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))
6374 return 1.0 # / self.alphasigma
6375
6376 - def reeval(self, X, fit, func, ask, args=()):
6377 """store two fitness lists, `fit` and ``fitre`` reevaluating some
6378 solutions in `X`.
6379 ``self.evaluations`` evaluations are done for each reevaluated
6380 fitness value.
6381 See `__call__()`, where `reeval()` is called.
6382
6383 """
6384 self.fit = list(fit)
6385 self.fitre = list(fit)
6386 self.idx = self.indices(fit)
6387 if not len(self.idx):
6388 return self.idx
6389 evals = int(self.evaluations) if self.f_aggregate else 1
6390 fagg = np.median if self.f_aggregate is None else self.f_aggregate
6391 for i in self.idx:
6392 X_i = X[i]
6393 if self.epsilon:
6394 if self.parallel:
6395 self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args))
6396 else:
6397 self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args)
6398 for _k in xrange(evals)])
6399 else:
6400 self.fitre[i] = fagg([func(X_i, *args) for _k in xrange(evals)])
6401 self.evaluations_just_done = evals * len(self.idx)
6402 return self.fit, self.fitre, self.idx
6403
6404 - def update_measure(self):
6405 """updated noise level measure using two fitness lists ``self.fit`` and
6406 ``self.fitre``, return ``self.noiseS, all_individual_measures``.
6407
6408 Assumes that `self.idx` contains the indices where the fitness
6409 lists differ
6410
6411 """
6412 lam = len(self.fit)
6413 idx = np.argsort(self.fit + self.fitre)
6414 ranks = np.argsort(idx).reshape((2, lam))
6415 rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
6416
6417 # compute rank change limits using both ranks[0] and ranks[1]
6418 r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
6419 limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
6420 self.theta * 50) +
6421 Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
6422 self.theta * 50))
6423 for i in self.idx]
6424 # compute measurement
6425 # max: 1 rankchange in 2*lambda is always fine
6426 s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
6427 self.noiseS += self.cum * (np.mean(s) - self.noiseS)
6428 return self.noiseS, s
6429
6430 - def indices(self, fit):
6431 """return the set of indices to be reevaluated for noise measurement,
6432 taking the ``lam_reeval`` best from the first ``2 * lam_reeval + 2``
6433 values.
6434
6435 Given the first values are the earliest, this is a useful policy also
6436 with a time changing objective.
6437
6438 """
6439 lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20
6440 reev = int(lam) + ((lam % 1) > np.random.rand())
6441 return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]
6442
6443 # ____________________________________________________________
6444 # ____________________________________________________________
6445 -class Sections(object):
6446 """plot sections through an objective function.
6447
6448 A first rational thing to do, when facing an (expensive)
6449 application. By default 6 points in each coordinate are evaluated.
6450 This class is still experimental.
6451
6452 Examples
6453 --------
6454
6455 >>> import cma, numpy as np
6456 >>> s = cma.Sections(cma.Fcts.rosen, np.zeros(3)).do(plot=False)
6457 >>> s.do(plot=False) # evaluate the same points again, i.e. check for noise
6458 >> try:
6459 ... s.plot()
6460 ... except:
6461 ... print('plotting failed: matplotlib.pyplot package is missing?')
6462
6463 Details
6464 -------
6465 Data are saved after each function call during `do()`. The filename
6466 is attribute ``name`` and by default ``str(func)``, see `__init__()`.
6467
6468 A random (orthogonal) basis can be generated with
6469 ``cma.Rotation()(np.eye(3))``.
6470
6471 CAVEAT: The default name is unique in the function name, but it
6472 should be unique in all parameters of `__init__()` but `plot_cmd`
6473 and `load`. If, for example, a different basis is chosen, either
6474 the name must be changed or the ``.pkl`` file containing the
6475 previous data must first be renamed or deleted.
6476
6477 ``s.res`` is a dictionary with an entry for each "coordinate" ``i``
6478 and with an entry ``'x'``, the middle point. Each entry ``i`` is
6479 again a dictionary with keys being different dx values and the
6480 value being a sequence of f-values. For example ``s.res[2][0.1] ==
6481 [0.01, 0.01]``, which is generated using the difference vector ``s
6482 .basis[2]`` like
6483
6484 ``s.res[2][dx] += func(s.res['x'] + dx * s.basis[2])``.
6485
6486 :See: `__init__()`
6487
6488 """
6489 - def __init__(self, func, x, args=(), basis=None, name=None,
6490 plot_cmd=pyplot.plot if pyplot else None, load=True):
6491 """
6492 Parameters
6493 ----------
6494 `func`
6495 objective function
6496 `x`
6497 point in search space, middle point of the sections
6498 `args`
6499 arguments passed to `func`
6500 `basis`
6501 evaluated points are ``func(x + locations[j] * basis[i]) for i in len(basis) for j in len(locations)``,
6502 see `do()`
6503 `name`
6504 filename where to save the result
6505 `plot_cmd`
6506 command used to plot the data, typically matplotlib pyplots `plot` or `semilogy`
6507 `load`
6508 load previous data from file ``str(func) + '.pkl'``
6509
6510 """
6511 self.func = func
6512 self.args = args
6513 self.x = x
6514 self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '')
6515 self.plot_cmd = plot_cmd # or semilogy
6516 self.basis = np.eye(len(x)) if basis is None else basis
6517
6518 try:
6519 self.load()
6520 if any(self.res['x'] != x):
6521 self.res = {}
6522 self.res['x'] = x # TODO: res['x'] does not look perfect
6523 else:
6524 print(self.name + ' loaded')
6525 except:
6526 self.res = {}
6527 self.res['x'] = x
6528
6529 - def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True):
6530 """generates, plots and saves function values ``func(y)``,
6531 where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in
6532 the ``res`` attribute and the class instance is saved in a file
6533 with (the weired) name ``str(func)``.
6534
6535 Parameters
6536 ----------
6537 `repetitions`
6538 for each point, only for noisy functions is >1 useful. For
6539 ``repetitions==0`` only already generated data are plotted.
6540 `locations`
6541 coordinated wise deviations from the middle point given in `__init__`
6542
6543 """
6544 if not repetitions:
6545 self.plot()
6546 return
6547
6548 res = self.res
6549 for i in range(len(self.basis)): # i-th coordinate
6550 if i not in res:
6551 res[i] = {}
6552 # xx = np.array(self.x)
6553 # TODO: store res[i]['dx'] = self.basis[i] here?
6554 for dx in locations:
6555 xx = self.x + dx * self.basis[i]
6556 xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx
6557 if xkey not in res[i]:
6558 res[i][xkey] = []
6559 n = repetitions
6560 while n > 0:
6561 n -= 1
6562 res[i][xkey].append(self.func(xx, *self.args))
6563 if plot:
6564 self.plot()
6565 self.save()
6566 return self
6567
6568 - def plot(self, plot_cmd=None, tf=lambda y: y):
6569 """plot the data we have, return ``self``"""
6570 if not plot_cmd:
6571 plot_cmd = self.plot_cmd
6572 colors = 'bgrcmyk'
6573 pyplot.hold(False)
6574 res = self.res
6575
6576 flatx, flatf = self.flattened()
6577 minf = np.inf
6578 for i in flatf:
6579 minf = min((minf, min(flatf[i])))
6580 addf = 1e-9 - minf if minf <= 1e-9 else 0
6581 for i in sorted(res.keys()): # we plot not all values here
6582 if isinstance(i, int):
6583 color = colors[i % len(colors)]
6584 arx = sorted(res[i].keys())
6585 plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
6586 pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
6587 pyplot.hold(True)
6588 plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
6589 pyplot.ylabel('f + ' + str(addf))
6590 pyplot.draw()
6591 show()
6592 # raw_input('press return')
6593 return self
6594
6595 - def flattened(self):
6596 """return flattened data ``(x, f)`` such that for the sweep through
6597 coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
6598
6599 """
6600 flatx = {}
6601 flatf = {}
6602 for i in self.res:
6603 if isinstance(i, int):
6604 flatx[i] = []
6605 flatf[i] = []
6606 for x in sorted(self.res[i]):
6607 for d in sorted(self.res[i][x]):
6608 flatx[i].append(x)
6609 flatf[i].append(d)
6610 return flatx, flatf
6611
6612 - def save(self, name=None):
6613 """save to file"""
6614 import pickle
6615 name = name if name else self.name
6616 fun = self.func
6617 del self.func # instance method produces error
6618 pickle.dump(self, open(name + '.pkl', "wb"))
6619 self.func = fun
6620 return self
6621
6622 - def load(self, name=None):
6623 """load from file"""
6624 import pickle
6625 name = name if name else self.name
6626 s = pickle.load(open(name + '.pkl', 'rb'))
6627 self.res = s.res # disregard the class
6628 return self
6629
6630 #____________________________________________________________
6631 #____________________________________________________________
6632 -class _Error(Exception):
6633 """generic exception of cma module"""
6634 pass
6635
6636 # ____________________________________________________________
6637 # ____________________________________________________________
6638 #
6639 -class ElapsedTime(object):
6640 """using ``time.clock`` with overflow handling to measure CPU time.
6641
6642 Example:
6643
6644 >>> clock = ElapsedTime() # clock starts here
6645 >>> t1 = clock() # get elapsed CPU time
6646
6647 Details: 32-bit C overflows after int(2**32/1e6) == 4294s about 72 min
6648
6649 """
6650 - def __init__(self):
6651 self.tic0 = time.clock()
6652 self.tic = self.tic0
6653 self.lasttoc = time.clock()
6654 self.lastdiff = time.clock() - self.lasttoc
6655 self.time_to_add = 0
6656 self.messages = 0
6657 reset = __init__
6658 - def __call__(self):
6659 toc = time.clock()
6660 if toc - self.tic >= self.lasttoc - self.tic:
6661 self.lastdiff = toc - self.lasttoc
6662 self.lasttoc = toc
6663 else: # overflow, reset self.tic
6664 if self.messages < 3:
6665 self.messages += 1
6666 print(' in cma.ElapsedTime: time measure overflow, last difference estimated from',
6667 self.tic0, self.tic, self.lasttoc, toc, toc - self.lasttoc, self.lastdiff)
6668
6669 self.time_to_add += self.lastdiff + self.lasttoc - self.tic
6670 self.tic = toc # reset
6671 self.lasttoc = toc
6672 self.elapsedtime = toc - self.tic + self.time_to_add
6673 return self.elapsedtime
6674
6675 -class Misc(object):
6676 # ____________________________________________________________
6677 # ____________________________________________________________
6678 #
6679 - class MathHelperFunctions(object):
6680 """static convenience math helper functions, if the function name
6681 is preceded with an "a", a numpy array is returned
6682
6683 """
6684 @staticmethod
6685 - def aclamp(x, upper):
6686 return -Misc.MathHelperFunctions.apos(-x, -upper)
6687 @staticmethod
6688 - def equals_approximately(a, b, eps=1e-12):
6689 if a < 0:
6690 a, b = -1 * a, -1 * b
6691 return (a - eps < b < a + eps) or ((1 - eps) * a < b < (1 + eps) * a)
6692 @staticmethod
6693 - def vequals_approximately(a, b, eps=1e-12):
6694 a, b = array(a), array(b)
6695 idx = np.where(a < 0)[0]
6696 if len(idx):
6697 a[idx], b[idx] = -1 * a[idx], -1 * b[idx]
6698 return (np.all(a - eps < b) and np.all(b < a + eps)
6699 ) or (np.all((1 - eps) * a < b) and np.all(b < (1 + eps) * a))
6700 @staticmethod
6701 - def expms(A, eig=np.linalg.eigh):
6702 """matrix exponential for a symmetric matrix"""
6703 # TODO: check that this works reliably for low rank matrices
6704 # first: symmetrize A
6705 D, B = eig(A)
6706 return np.dot(B, (np.exp(D) * B).T)
6707 @staticmethod
6708 - def amax(vec, vec_or_scalar):
6709 return array(Misc.MathHelperFunctions.max(vec, vec_or_scalar))
6710 @staticmethod
6711 - def max(vec, vec_or_scalar):
6712 b = vec_or_scalar
6713 if np.isscalar(b):
6714 m = [max(x, b) for x in vec]
6715 else:
6716 m = [max(vec[i], b[i]) for i in rglen((vec))]
6717 return m
6718 @staticmethod
6719 - def minmax(val, min_val, max_val):
6720 assert min_val <= max_val
6721 return min((max_val, max((val, min_val))))
6722 @staticmethod
6723 - def amin(vec_or_scalar, vec_or_scalar2):
6724 return array(Misc.MathHelperFunctions.min(vec_or_scalar, vec_or_scalar2))
6725 @staticmethod
6726 - def min(a, b):
6727 iss = np.isscalar
6728 if iss(a) and iss(b):
6729 return min(a, b)
6730 if iss(a):
6731 a, b = b, a
6732 # now only b can be still a scalar
6733 if iss(b):
6734 return [min(x, b) for x in a]
6735 else: # two non-scalars must have the same length
6736 return [min(a[i], b[i]) for i in rglen((a))]
6737 @staticmethod
6738 - def norm(vec, expo=2):
6739 return sum(vec**expo)**(1 / expo)
6740 @staticmethod
6741 - def apos(x, lower=0):
6742 """clips argument (scalar or array) from below at lower"""
6743 if lower == 0:
6744 return (x > 0) * x
6745 else:
6746 return lower + (x > lower) * (x - lower)
6747 @staticmethod
6748 - def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
6749 """``prctile(data, 50)`` returns the median, but p_vals can
6750 also be a sequence.
6751
6752 Provides for small samples better values than matplotlib.mlab.prctile,
6753 however also slower.
6754
6755 """
6756 ps = [p_vals] if np.isscalar(p_vals) else p_vals
6757
6758 if not sorted_:
6759 data = sorted(data)
6760 n = len(data)
6761 d = []
6762 for p in ps:
6763 fi = p * n / 100 - 0.5
6764 if fi <= 0: # maybe extrapolate?
6765 d.append(data[0])
6766 elif fi >= n - 1:
6767 d.append(data[-1])
6768 else:
6769 i = int(fi)
6770 d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
6771 return d[0] if np.isscalar(p_vals) else d
6772 @staticmethod
6773 - def sround(nb): # TODO: to be vectorized
6774 """return stochastic round: floor(nb) + (rand()<remainder(nb))"""
6775 return nb // 1 + (np.random.rand(1)[0] < (nb % 1))
6776
6777 @staticmethod
6778 - def cauchy_with_variance_one():
6779 n = np.random.randn() / np.random.randn()
6780 while abs(n) > 1000:
6781 n = np.random.randn() / np.random.randn()
6782 return n / 25
6783 @staticmethod
6784 - def standard_finite_cauchy(size=1):
6785 try:
6786 l = len(size)
6787 except TypeError:
6788 l = 0
6789
6790 if l == 0:
6791 return array([Mh.cauchy_with_variance_one() for _i in xrange(size)])
6792 elif l == 1:
6793 return array([Mh.cauchy_with_variance_one() for _i in xrange(size[0])])
6794 elif l == 2:
6795 return array([[Mh.cauchy_with_variance_one() for _i in xrange(size[1])]
6796 for _j in xrange(size[0])])
6797 else:
6798 raise _Error('len(size) cannot be large than two')
6799
6800
6801 @staticmethod
6802 - def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
6803 """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
6804 # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
6805 # for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
6806 if m is None:
6807 dx = x
6808 else:
6809 dx = x - m # array(x) - array(m)
6810 n = len(x)
6811 s2pi = (2 * np.pi)**(n / 2.)
6812 if Cinv is None:
6813 return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
6814 if detC is None:
6815 detC = 1. / np.linalg.linalg.det(Cinv)
6816 return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
6817
6818 @staticmethod
6819 - def loglikelihood(self, x, previous=False):
6820 """return log-likelihood of `x` regarding the current sample distribution"""
6821 # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
6822 # for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
6823 # TODO: test this!!
6824 # c=cma.fmin...
6825 # c[3]['cma'].loglikelihood(...)
6826
6827 if previous and hasattr(self, 'lastiter'):
6828 sigma = self.lastiter.sigma
6829 Crootinv = self.lastiter._Crootinv
6830 xmean = self.lastiter.mean
6831 D = self.lastiter.D
6832 elif previous and self.countiter > 1:
6833 raise _Error('no previous distribution parameters stored, check options importance_mixing')
6834 else:
6835 sigma = self.sigma
6836 Crootinv = self._Crootinv
6837 xmean = self.mean
6838 D = self.D
6839
6840 dx = array(x) - xmean # array(x) - array(m)
6841 n = self.N
6842 logs2pi = n * log(2 * np.pi) / 2.
6843 logdetC = 2 * sum(log(D))
6844 dx = np.dot(Crootinv, dx)
6845 res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma)
6846 if 1 < 3: # testing
6847 s2pi = (2 * np.pi)**(n / 2.)
6848 detC = np.prod(D)**2
6849 res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)
6850 assert res2 < res + 1e-8 or res2 > res - 1e-8
6851 return res
6852
6853 # ____________________________________________________________
6854 # ____________________________________________________________
6855 #
6856 # C and B are arrays rather than matrices, because they are
6857 # addressed via B[i][j], matrices can only be addressed via B[i,j]
6858
6859 # tred2(N, B, diagD, offdiag);
6860 # tql2(N, diagD, offdiag, B);
6861
6862
6863 # Symmetric Householder reduction to tridiagonal form, translated from JAMA package.
6864 @staticmethod
6865 - def eig(C):
6866 """eigendecomposition of a symmetric matrix, much slower than
6867 `numpy.linalg.eigh`, return ``(EVals, Basis)``, the eigenvalues
6868 and an orthonormal basis of the corresponding eigenvectors, where
6869
6870 ``Basis[i]``
6871 the i-th row of ``Basis``
6872 columns of ``Basis``, ``[Basis[j][i] for j in range(len(Basis))]``
6873 the i-th eigenvector with eigenvalue ``EVals[i]``
6874
6875 """
6876
6877 # class eig(object):
6878 # def __call__(self, C):
6879
6880 # Householder transformation of a symmetric matrix V into tridiagonal form.
6881 # -> n : dimension
6882 # -> V : symmetric nxn-matrix
6883 # <- V : orthogonal transformation matrix:
6884 # tridiag matrix == V * V_in * V^t
6885 # <- d : diagonal
6886 # <- e[0..n-1] : off diagonal (elements 1..n-1)
6887
6888 # Symmetric tridiagonal QL algorithm, iterative
6889 # Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations
6890 # -> n : Dimension.
6891 # -> d : Diagonale of tridiagonal matrix.
6892 # -> e[1..n-1] : off-diagonal, output from Householder
6893 # -> V : matrix output von Householder
6894 # <- d : eigenvalues
6895 # <- e : garbage?
6896 # <- V : basis of eigenvectors, according to d
6897
6898
6899 # tred2(N, B, diagD, offdiag); B=C on input
6900 # tql2(N, diagD, offdiag, B);
6901
6902 # private void tred2 (int n, double V[][], double d[], double e[]) {
6903 def tred2 (n, V, d, e):
6904 # This is derived from the Algol procedures tred2 by
6905 # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
6906 # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
6907 # Fortran subroutine in EISPACK.
6908
6909 num_opt = False # factor 1.5 in 30-D
6910
6911 for j in range(n):
6912 d[j] = V[n - 1][j] # d is output argument
6913
6914 # Householder reduction to tridiagonal form.
6915
6916 for i in range(n - 1, 0, -1):
6917 # Scale to avoid under/overflow.
6918 h = 0.0
6919 if not num_opt:
6920 scale = 0.0
6921 for k in range(i):
6922 scale = scale + abs(d[k])
6923 else:
6924 scale = sum(abs(d[0:i]))
6925
6926 if scale == 0.0:
6927 e[i] = d[i - 1]
6928 for j in range(i):
6929 d[j] = V[i - 1][j]
6930 V[i][j] = 0.0
6931 V[j][i] = 0.0
6932 else:
6933
6934 # Generate Householder vector.
6935 if not num_opt:
6936 for k in range(i):
6937 d[k] /= scale
6938 h += d[k] * d[k]
6939 else:
6940 d[:i] /= scale
6941 h = np.dot(d[:i], d[:i])
6942
6943 f = d[i - 1]
6944 g = h**0.5
6945
6946 if f > 0:
6947 g = -g
6948
6949 e[i] = scale * g
6950 h = h - f * g
6951 d[i - 1] = f - g
6952 if not num_opt:
6953 for j in range(i):
6954 e[j] = 0.0
6955 else:
6956 e[:i] = 0.0
6957
6958 # Apply similarity transformation to remaining columns.
6959
6960 for j in range(i):
6961 f = d[j]
6962 V[j][i] = f
6963 g = e[j] + V[j][j] * f
6964 if not num_opt:
6965 for k in range(j + 1, i):
6966 g += V[k][j] * d[k]
6967 e[k] += V[k][j] * f
6968 e[j] = g
6969 else:
6970 e[j + 1:i] += V.T[j][j + 1:i] * f
6971 e[j] = g + np.dot(V.T[j][j + 1:i], d[j + 1:i])
6972
6973 f = 0.0
6974 if not num_opt:
6975 for j in range(i):
6976 e[j] /= h
6977 f += e[j] * d[j]
6978 else:
6979 e[:i] /= h
6980 f += np.dot(e[:i], d[:i])
6981
6982 hh = f / (h + h)
6983 if not num_opt:
6984 for j in range(i):
6985 e[j] -= hh * d[j]
6986 else:
6987 e[:i] -= hh * d[:i]
6988
6989 for j in range(i):
6990 f = d[j]
6991 g = e[j]
6992 if not num_opt:
6993 for k in range(j, i):
6994 V[k][j] -= (f * e[k] + g * d[k])
6995 else:
6996 V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
6997
6998 d[j] = V[i - 1][j]
6999 V[i][j] = 0.0
7000
7001 d[i] = h
7002 # end for i--
7003
7004 # Accumulate transformations.
7005
7006 for i in range(n - 1):
7007 V[n - 1][i] = V[i][i]
7008 V[i][i] = 1.0
7009 h = d[i + 1]
7010 if h != 0.0:
7011 if not num_opt:
7012 for k in range(i + 1):
7013 d[k] = V[k][i + 1] / h
7014 else:
7015 d[:i + 1] = V.T[i + 1][:i + 1] / h
7016
7017 for j in range(i + 1):
7018 if not num_opt:
7019 g = 0.0
7020 for k in range(i + 1):
7021 g += V[k][i + 1] * V[k][j]
7022 for k in range(i + 1):
7023 V[k][j] -= g * d[k]
7024 else:
7025 g = np.dot(V.T[i + 1][0:i + 1], V.T[j][0:i + 1])
7026 V.T[j][:i + 1] -= g * d[:i + 1]
7027
7028 if not num_opt:
7029 for k in range(i + 1):
7030 V[k][i + 1] = 0.0
7031 else:
7032 V.T[i + 1][:i + 1] = 0.0
7033
7034
7035 if not num_opt:
7036 for j in range(n):
7037 d[j] = V[n - 1][j]
7038 V[n - 1][j] = 0.0
7039 else:
7040 d[:n] = V[n - 1][:n]
7041 V[n - 1][:n] = 0.0
7042
7043 V[n - 1][n - 1] = 1.0
7044 e[0] = 0.0
7045
7046
7047 # Symmetric tridiagonal QL algorithm, taken from JAMA package.
7048 # private void tql2 (int n, double d[], double e[], double V[][]) {
7049 # needs roughly 3N^3 operations
7050 def tql2 (n, d, e, V):
7051
7052 # This is derived from the Algol procedures tql2, by
7053 # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
7054 # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
7055 # Fortran subroutine in EISPACK.
7056
7057 num_opt = False # using vectors from numpy makes it faster
7058
7059 if not num_opt:
7060 for i in range(1, n): # (int i = 1; i < n; i++):
7061 e[i - 1] = e[i]
7062 else:
7063 e[0:n - 1] = e[1:n]
7064 e[n - 1] = 0.0
7065
7066 f = 0.0
7067 tst1 = 0.0
7068 eps = 2.0**-52.0
7069 for l in range(n): # (int l = 0; l < n; l++) {
7070
7071 # Find small subdiagonal element
7072
7073 tst1 = max(tst1, abs(d[l]) + abs(e[l]))
7074 m = l
7075 while m < n:
7076 if abs(e[m]) <= eps * tst1:
7077 break
7078 m += 1
7079
7080 # If m == l, d[l] is an eigenvalue,
7081 # otherwise, iterate.
7082
7083 if m > l:
7084 iiter = 0
7085 while 1: # do {
7086 iiter += 1 # (Could check iteration count here.)
7087
7088 # Compute implicit shift
7089
7090 g = d[l]
7091 p = (d[l + 1] - g) / (2.0 * e[l])
7092 r = (p**2 + 1)**0.5 # hypot(p,1.0)
7093 if p < 0:
7094 r = -r
7095
7096 d[l] = e[l] / (p + r)
7097 d[l + 1] = e[l] * (p + r)
7098 dl1 = d[l + 1]
7099 h = g - d[l]
7100 if not num_opt:
7101 for i in range(l + 2, n):
7102 d[i] -= h
7103 else:
7104 d[l + 2:n] -= h
7105
7106 f = f + h
7107
7108 # Implicit QL transformation.
7109
7110 p = d[m]
7111 c = 1.0
7112 c2 = c
7113 c3 = c
7114 el1 = e[l + 1]
7115 s = 0.0
7116 s2 = 0.0
7117
7118 # hh = V.T[0].copy() # only with num_opt
7119 for i in range(m - 1, l - 1, -1): # (int i = m-1; i >= l; i--) {
7120 c3 = c2
7121 c2 = c
7122 s2 = s
7123 g = c * e[i]
7124 h = c * p
7125 r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])
7126 e[i + 1] = s * r
7127 s = e[i] / r
7128 c = p / r
7129 p = c * d[i] - s * g
7130 d[i + 1] = h + s * (c * g + s * d[i])
7131
7132 # Accumulate transformation.
7133
7134 if not num_opt: # overall factor 3 in 30-D
7135 for k in range(n): # (int k = 0; k < n; k++) {
7136 h = V[k][i + 1]
7137 V[k][i + 1] = s * V[k][i] + c * h
7138 V[k][i] = c * V[k][i] - s * h
7139 else: # about 20% faster in 10-D
7140 hh = V.T[i + 1].copy()
7141 # hh[:] = V.T[i+1][:]
7142 V.T[i + 1] = s * V.T[i] + c * hh
7143 V.T[i] = c * V.T[i] - s * hh
7144 # V.T[i] *= c
7145 # V.T[i] -= s * hh
7146
7147 p = -s * s2 * c3 * el1 * e[l] / dl1
7148 e[l] = s * p
7149 d[l] = c * p
7150
7151 # Check for convergence.
7152 if abs(e[l]) <= eps * tst1:
7153 break
7154 # } while (Math.abs(e[l]) > eps*tst1);
7155
7156 d[l] = d[l] + f
7157 e[l] = 0.0
7158
7159
7160 # Sort eigenvalues and corresponding vectors.
7161 # tql2
7162
7163 N = len(C[0])
7164 if 1 < 3:
7165 V = [[x[i] for i in xrange(N)] for x in C] # copy each "row"
7166 d = N * [0.]
7167 e = N * [0.]
7168
7169 tred2(N, V, d, e)
7170 tql2(N, d, e, V)
7171 return (array(d), array(V))
7172 Mh = Misc.MathHelperFunctions
7173
7174 # from new_stuff import *
7175
7176 -def pprint(to_be_printed):
7177 """nicely formated print"""
7178 try:
7179 import pprint as pp
7180 # generate an instance PrettyPrinter
7181 # pp.PrettyPrinter().pprint(to_be_printed)
7182 pp.pprint(to_be_printed)
7183 except ImportError:
7184 if isinstance(to_be_printed, dict):
7185 print('{')
7186 for k, v in to_be_printed.items():
7187 print("'" + k + "'" if isinstance(k, basestring) else k,
7188 ': ',
7189 "'" + v + "'" if isinstance(k, basestring) else v,
7190 sep="")
7191 print('}')
7192 else:
7193 print('could not import pprint module, will apply regular print')
7194 print(to_be_printed)
7195
7196 pp = pprint
7197
7198 -class Rotation(object):
7199 """Rotation class that implements an orthogonal linear transformation,
7200 one for each dimension. Used to implement non-separable test functions.
7201
7202 Example:
7203
7204 >>> import cma, numpy as np
7205 >>> R = cma.Rotation()
7206 >>> R2 = cma.Rotation() # another rotation
7207 >>> x = np.array((1,2,3))
7208 >>> print(R(R(x), inverse=1))
7209 [ 1. 2. 3.]
7210
7211 """
7212 dicMatrices = {} # store matrix if necessary, for each dimension
7213 - def __init__(self):
7214 self.dicMatrices = {} # otherwise there might be shared bases which is probably not what we want
7215 - def __call__(self, x, inverse=False): # function when calling an object
7216 """Rotates the input array `x` with a fixed rotation matrix
7217 (``self.dicMatrices['str(len(x))']``)
7218 """
7219 N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
7220 if str(N) not in self.dicMatrices: # create new N-basis for once and all
7221 B = np.random.randn(N, N)
7222 for i in xrange(N):
7223 for j in xrange(0, i):
7224 B[i] -= np.dot(B[i], B[j]) * B[j]
7225 B[i] /= sum(B[i]**2)**0.5
7226 self.dicMatrices[str(N)] = B
7227 if inverse:
7228 return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
7229 else:
7230 return np.dot(self.dicMatrices[str(N)], x) # compute rotation
7231 # Use rotate(x) to rotate x
7232 rotate = Rotation()
7233
7234 # ____________________________________________________________
7235 # ____________________________________________________________
7236 #
7237 -class FitnessFunctions(object):
7238 """ versatile container for test objective functions """
7239
7240 - def __init__(self):
7241 self.counter = 0 # number of calls or any other practical use
7242 - def rot(self, x, fun, rot=1, args=()):
7243 """returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
7244 if len(np.shape(array(x))) > 1: # parallelized
7245 res = []
7246 for x in x:
7247 res.append(self.rot(x, fun, rot, args))
7248 return res
7249
7250 if rot:
7251 return fun(rotate(x, *args))
7252 else:
7253 return fun(x)
7254 - def somenan(self, x, fun, p=0.1):
7255 """returns sometimes np.NaN, otherwise fun(x)"""
7256 if np.random.rand(1) < p:
7257 return np.NaN
7258 else:
7259 return fun(x)
7260 - def rand(self, x):
7261 """Random test objective function"""
7262 return np.random.random(1)[0]
7263 - def linear(self, x):
7264 return -x[0]
7265 - def lineard(self, x):
7266 if 1 < 3 and any(array(x) < 0):
7267 return np.nan
7268 if 1 < 3 and sum([ (10 + i) * x[i] for i in rglen(x)]) > 50e3:
7269 return np.nan
7270 return -sum(x)
7271 - def sphere(self, x):
7272 """Sphere (squared norm) test objective function"""
7273 # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
7274 return sum((x + 0)**2)
7275 - def sphere_pos(self, x):
7276 """Sphere (squared norm) test objective function"""
7277 # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
7278 c = 0.0
7279 if x[0] < c:
7280 return np.nan
7281 return -c**2 + sum((x + 0)**2)
7282 - def spherewithoneconstraint(self, x):
7283 return sum((x + 0)**2) if x[0] > 1 else np.nan
7284 - def elliwithoneconstraint(self, x, idx=[-1]):
7285 return self.ellirot(x) if all(array(x)[idx] > 1) else np.nan
7286
7287 - def spherewithnconstraints(self, x):
7288 return sum((x + 0)**2) if all(array(x) > 1) else np.nan
7289 # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
7290 - def noisysphere(self, x, noise=2.10e-9, cond=1.0, noise_offset=0.10):
7291 """noise=10 does not work with default popsize, noise handling does not help """
7292 return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x)) + noise_offset * np.random.rand()
7293 - def spherew(self, x):
7294 """Sphere (squared norm) with sum x_i = 1 test objective function"""
7295 # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
7296 # s = sum(abs(x))
7297 # return sum((x/s+0)**2) - 1/len(x)
7298 # return sum((x/s)**2) - 1/len(x)
7299 return -0.01 * x[0] + abs(x[0])**-2 * sum(x[1:]**2)
7300 - def partsphere(self, x):
7301 """Sphere (squared norm) test objective function"""
7302 self.counter += 1
7303 # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
7304 dim = len(x)
7305 x = array([x[i % dim] for i in range(2 * dim)])
7306 N = 8
7307 i = self.counter % dim
7308 # f = sum(x[i:i + N]**2)
7309 f = sum(x[np.random.randint(dim, size=N)]**2)
7310 return f
7311 - def sectorsphere(self, x):
7312 """asymmetric Sphere (squared norm) test objective function"""
7313 return sum(x**2) + (1e6 - 1) * sum(x[x < 0]**2)
7314 - def cornersphere(self, x):
7315 """Sphere (squared norm) test objective function constraint to the corner"""
7316 nconstr = len(x) - 0
7317 if any(x[:nconstr] < 1):
7318 return np.NaN
7319 return sum(x**2) - nconstr
7320 - def cornerelli(self, x):
7321 """ """
7322 if any(x < 1):
7323 return np.NaN
7324 return self.elli(x) - self.elli(np.ones(len(x)))
7325 - def cornerellirot(self, x):
7326 """ """
7327 if any(x < 1):
7328 return np.NaN
7329 return self.ellirot(x)
7330 - def normalSkew(self, f):
7331 N = np.random.randn(1)[0]**2
7332 if N < 1:
7333 N = f * N # diminish blow up lower part
7334 return N
7335 - def noiseC(self, x, func=sphere, fac=10, expon=0.8):
7336 f = func(self, x)
7337 N = np.random.randn(1)[0] / np.random.randn(1)[0]
7338 return max(1e-19, f + (float(fac) / len(x)) * f**expon * N)
7339 - def noise(self, x, func=sphere, fac=10, expon=1):
7340 f = func(self, x)
7341 # R = np.random.randn(1)[0]
7342 R = np.log10(f) + expon * abs(10 - np.log10(f)) * np.random.rand(1)[0]
7343 # sig = float(fac)/float(len(x))
7344 # R = log(f) + 0.5*log(f) * random.randn(1)[0]
7345 # return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R))
7346 # return max(1e-19, f * np.exp(sig * N / f**expon))
7347 # return max(1e-19, f * normalSkew(f**expon)**sig)
7348 return f + 10**R # == f + f**(1+0.5*RN)
7349 - def cigar(self, x, rot=0, cond=1e6, noise=0):
7350 """Cigar test objective function"""
7351 if rot:
7352 x = rotate(x)
7353 x = [x] if np.isscalar(x[0]) else x # scalar into list
7354 f = [(x[0]**2 + cond * sum(x[1:]**2)) * np.exp(noise * np.random.randn(1)[0] / len(x)) for x in x]
7355 return f if len(f) > 1 else f[0] # 1-element-list into scalar
7356 - def tablet(self, x, rot=0):
7357 """Tablet test objective function"""
7358 if rot:
7359 x = rotate(x)
7360 x = [x] if np.isscalar(x[0]) else x # scalar into list
7361 f = [1e6 * x[0]**2 + sum(x[1:]**2) for x in x]
7362 return f if len(f) > 1 else f[0] # 1-element-list into scalar
7363 - def cigtab(self, y):
7364 """Cigtab test objective function"""
7365 X = [y] if np.isscalar(y[0]) else y
7366 f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]
7367 return f if len(f) > 1 else f[0]
7368 - def twoaxes(self, y):
7369 """Cigtab test objective function"""
7370 X = [y] if np.isscalar(y[0]) else y
7371 N2 = len(X[0]) // 2
7372 f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]
7373 return f if len(f) > 1 else f[0]
7374 - def ellirot(self, x):
7375 return fcts.elli(array(x), 1)
7376 - def hyperelli(self, x):
7377 N = len(x)
7378 return sum((np.arange(1, N + 1) * x)**2)
7379 - def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
7380 """Ellipsoid test objective function"""
7381 if not np.isscalar(x[0]): # parallel evaluation
7382 return [self.elli(xi, rot) for xi in x] # could save 20% overall
7383 if rot:
7384 x = rotate(x)
7385 N = len(x)
7386 if actuator_noise:
7387 x = x + actuator_noise * np.random.randn(N)
7388
7389 ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
7390
7391 alpha = 0.49 + 1. / N
7392 beta = 1
7393 felli = np.random.rand(1)[0]**beta * ftrue * \
7394 max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
7395 # felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
7396 # np.abs(np.random.randn(1)[0]))**0
7397 if both:
7398 return (felli, ftrue)
7399 else:
7400 # return felli # possibly noisy value
7401 return ftrue # + np.random.randn()
7402 - def elliconstraint(self, x, cfac=1e8, tough=True, cond=1e6):
7403 """ellipsoid test objective function with "constraints" """
7404 N = len(x)
7405 f = sum(cond**(np.arange(N)[-1::-1] / (N - 1)) * x**2)
7406 cvals = (x[0] + 1,
7407 x[0] + 1 + 100 * x[1],
7408 x[0] + 1 - 100 * x[1])
7409 if tough:
7410 f += cfac * sum(max(0, c) for c in cvals)
7411 else:
7412 f += cfac * sum(max(0, c + 1e-3)**2 for c in cvals)
7413 return f
7414 - def rosen(self, x, alpha=1e2):
7415 """Rosenbrock test objective function"""
7416 x = [x] if np.isscalar(x[0]) else x # scalar into list
7417 f = [sum(alpha * (x[:-1]**2 - x[1:])**2 + (1. - x[:-1])**2) for x in x]
7418 return f if len(f) > 1 else f[0] # 1-element-list into scalar
7419 - def diffpow(self, x, rot=0):
7420 """Diffpow test objective function"""
7421 N = len(x)
7422 if rot:
7423 x = rotate(x)
7424 return sum(np.abs(x)**(2. + 4.*np.arange(N) / (N - 1.)))**0.5
7425 - def rosenelli(self, x):
7426 N = len(x)
7427 return self.rosen(x[:N / 2]) + self.elli(x[N / 2:], cond=1)
7428 - def ridge(self, x, expo=2):
7429 x = [x] if np.isscalar(x[0]) else x # scalar into list
7430 f = [x[0] + 100 * np.sum(x[1:]**2)**(expo / 2.) for x in x]
7431 return f if len(f) > 1 else f[0] # 1-element-list into scalar
7432 - def ridgecircle(self, x, expo=0.5):
7433 """happy cat by HG Beyer"""
7434 a = len(x)
7435 s = sum(x**2)
7436 return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a
7437 - def happycat(self, x, alpha=1. / 8):
7438 s = sum(x**2)
7439 return ((s - len(x))**2)**alpha + (s / 2 + sum(x)) / len(x) + 0.5
7440 - def flat(self, x):
7441 return 1
7442 return 1 if np.random.rand(1) < 0.9 else 1.1
7443 return np.random.randint(1, 30)
7444 - def branin(self, x):
7445 # in [0,15]**2
7446 y = x[1]
7447 x = x[0] + 5
7448 return (y - 5.1 * x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1 / 8 / np.pi) * np.cos(x) + 10 - 0.397887357729738160000
7449 - def goldsteinprice(self, x):
7450 x1 = x[0]
7451 x2 = x[1]
7452 return (1 + (x1 + x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * (
7453 30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3
7454 - def griewank(self, x):
7455 # was in [-600 600]
7456 x = (600. / 5) * x
7457 return 1 - np.prod(np.cos(x / sqrt(1. + np.arange(len(x))))) + sum(x**2) / 4e3
7458 - def rastrigin(self, x):
7459 """Rastrigin test objective function"""
7460 if not np.isscalar(x[0]):
7461 N = len(x[0])
7462 return [10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x]
7463 # return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1)
7464 N = len(x)
7465 return 10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x))
7466 - def schaffer(self, x):
7467 """ Schaffer function x0 in [-100..100]"""
7468 N = len(x)
7469 s = x[0:N - 1]**2 + x[1:N]**2
7470 return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1))
7471
7472 - def schwefelelli(self, x):
7473 s = 0
7474 f = 0
7475 for i in rglen(x):
7476 s += x[i]
7477 f += s**2
7478 return f
7479 - def schwefelmult(self, x, pen_fac=1e4):
7480 """multimodal Schwefel function with domain -500..500"""
7481 y = [x] if np.isscalar(x[0]) else x
7482 N = len(y[0])
7483 f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5))
7484 + pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
7485 return f if len(f) > 1 else f[0]
7486 - def optprob(self, x):
7487 n = np.arange(len(x)) + 1
7488 f = n * x * (1 - x)**(n - 1)
7489 return sum(1 - f)
7490 - def lincon(self, x, theta=0.01):
7491 """ridge like linear function with one linear constraint"""
7492 if x[0] < 0:
7493 return np.NaN
7494 return theta * x[1] + x[0]
7495 - def rosen_nesterov(self, x, rho=100):
7496 """needs exponential number of steps in a non-increasing f-sequence.
7497
7498 x_0 = (-1,1,...,1)
7499 See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
7500
7501 """
7502 f = 0.25 * (x[0] - 1)**2
7503 f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
7504 return f
7505 - def powel_singular(self, x):
7506 # ((8 * np.sin(7 * (x[i] - 0.9)**2)**2 ) + (6 * np.sin()))
7507 res = np.sum((x[i - 1] + 10 * x[i])**2 + 5 * (x[i + 1] - x[i + 2])**2 +
7508 (x[i] - 2 * x[i + 1])**4 + 10 * (x[i - 1] - x[i + 2])**4
7509 for i in xrange(1, len(x) - 2))
7510 return 1 + res
7511 - def styblinski_tang(self, x):
7512 return (39.1661657037714171054273576010019 * len(x))**1 + sum(x**4 - 16*x**2 + 5*x) / 2
7513 - def bukin(self, x):
7514 """Bukin function from Wikipedia, generalized simplistically from 2-D.
7515
7516 http://en.wikipedia.org/wiki/Test_functions_for_optimization"""
7517 s = 0
7518 for k in range((1+len(x)) // 2):
7519 z = x[2 * k]
7520 y = x[min((2*k + 1, len(x)-1))]
7521 s += 100 * np.abs(y - 0.01 * z**2)**0.5 + 0.01 * np.abs(z + 10)
7522 return s
7523
7524 fcts = FitnessFunctions()
7525 Fcts = fcts # for cross compatibility, as if the functions were static members of class Fcts
7526 -def felli(x):
7527 """unbound test function, needed to test multiprocessor"""
7528 return sum(1e6**(np.arange(len(x)) / (len(x) - 1)) * (np.array(x, copy=False))**2)
7529
7530
7531 # ____________________________________________
7532 # ____________________________________________________________
7533 -def _test(module=None): # None is fine when called from inside the module
7534 import doctest
7535 print(doctest.testmod(module)) # this is pretty coool!
7536 -def process_doctest_output(stream=None):
7537 """ """
7538 import fileinput
7539 s1 = ""
7540 s2 = ""
7541 s3 = ""
7542 state = 0
7543 for line in fileinput.input(stream): # takes argv as file or stdin
7544 if 1 < 3:
7545 s3 += line
7546 if state < -1 and line.startswith('***'):
7547 print(s3)
7548 if line.startswith('***'):
7549 s3 = ""
7550
7551 if state == -1: # found a failed example line
7552 s1 += '\n\n*** Failed Example:' + line
7553 s2 += '\n\n\n' # line
7554 # state = 0 # wait for 'Expected:' line
7555
7556 if line.startswith('Expected:'):
7557 state = 1
7558 continue
7559 elif line.startswith('Got:'):
7560 state = 2
7561 continue
7562 elif line.startswith('***'): # marks end of failed example
7563 state = 0
7564 elif line.startswith('Failed example:'):
7565 state = -1
7566 elif line.startswith('Exception raised'):
7567 state = -2
7568
7569 # in effect more else:
7570 if state == 1:
7571 s1 += line + ''
7572 if state == 2:
7573 s2 += line + ''
7574
7575 # ____________________________________________________________
7576 # ____________________________________________________________
7577 #
7578 -def main(argv=None):
7579 """to install and/or test from the command line use::
7580
7581 python cma.py [options | func dim sig0 [optkey optval][optkey optval]...]
7582
7583 with options being
7584
7585 ``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity.
7586
7587 ``install`` to install cma.py (uses setup from distutils.core).
7588
7589 ``--doc`` for more infos.
7590
7591 Or start Python or (even better) ``ipython -pylab`` and::
7592
7593 import cma
7594 cma.main('--test')
7595 help(cma)
7596 help(cma.fmin)
7597 res = fmin(cma.fcts.rosen, 10 * [0], 1)
7598 cma.plot()
7599
7600 Examples
7601 ========
7602 Testing with the local python distribution from a command line
7603 in a folder where ``cma.py`` can be found::
7604
7605 python cma.py --test
7606
7607 And a single run on the Rosenbrock function::
7608
7609 python cma.py rosen 10 1 # dimension initial_sigma
7610 python cma.py plot
7611
7612 In the python shell::
7613
7614 import cma
7615 cma.main('--test')
7616
7617 """
7618 if argv is None:
7619 argv = sys.argv # should have better been sys.argv[1:]
7620 else:
7621 if isinstance(argv, list):
7622 argv = ['python'] + argv # see above
7623 else:
7624 argv = ['python'] + [argv]
7625
7626 # uncomment for unit test
7627 # _test()
7628 # handle input arguments, getopt might be helpful ;-)
7629 if len(argv) >= 1: # function and help
7630 if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'):
7631 print(main.__doc__)
7632 fun = None
7633 elif argv[1].startswith('-t') or argv[1].startswith('--test'):
7634 import doctest
7635 if len(argv) > 2 and (argv[2].startswith('--v') or argv[2].startswith('-v')): # verbose
7636 print('doctest for cma.py: due to different platforms and python versions')
7637 print('and in some cases due to a missing unique random seed')
7638 print('many examples will "fail". This is OK, if they give a similar')
7639 print('to the expected result and if no exception occurs. ')
7640 # if argv[1][2] == 'v':
7641 doctest.testmod(sys.modules[__name__], report=True) # this is quite cool!
7642 else: # was: if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')):
7643 print('doctest for cma.py: launching...') # not anymore: (it might be necessary to close the pop up window to finish)
7644 fn = '_cma_doctest_.txt'
7645 stdout = sys.stdout
7646 try:
7647 with open(fn, 'w') as f:
7648 sys.stdout = f
7649 clock = ElapsedTime()
7650 doctest.testmod(sys.modules[__name__], report=True) # this is quite cool!
7651 t_elapsed = clock()
7652 finally:
7653 sys.stdout = stdout
7654 process_doctest_output(fn)
7655 # clean up
7656 try:
7657 import os
7658 for name in os.listdir('.'):
7659 if (name.startswith('bound_method_FitnessFunctions.rosen_of_cma.FitnessFunctions_object_at_')
7660 and name.endswith('.pkl')):
7661 os.remove(name)
7662 except:
7663 pass
7664 print('doctest for cma.py: finished (no other output should be seen after launching, more in file _cma_doctest_.txt)')
7665 print(' elapsed time [s]:', t_elapsed)
7666 return
7667 elif argv[1] == '--doc':
7668 print(__doc__)
7669 print(CMAEvolutionStrategy.__doc__)
7670 print(fmin.__doc__)
7671 fun = None
7672 elif argv[1] == '--fcts':
7673 print('List of valid function names:')
7674 print([d for d in dir(fcts) if not d.startswith('_')])
7675 fun = None
7676 elif argv[1] in ('install', '--install'):
7677 from distutils.core import setup
7678 setup(name="cma",
7679 long_description=__doc__,
7680 version=__version__.split()[0],
7681 description="CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python",
7682 author="Nikolaus Hansen",
7683 author_email="hansen at lri.fr",
7684 maintainer="Nikolaus Hansen",
7685 maintainer_email="hansen at lri.fr",
7686 url="https://www.lri.fr/~hansen/cmaes_inmatlab.html#python",
7687 license="MIT",
7688 classifiers = [
7689 "Intended Audience :: Science/Research",
7690 "Intended Audience :: Education",
7691 "Intended Audience :: Other Audience",
7692 "Topic :: Scientific/Engineering",
7693 "Topic :: Scientific/Engineering :: Mathematics",
7694 "Topic :: Scientific/Engineering :: Artificial Intelligence",
7695 "Operating System :: OS Independent",
7696 "Programming Language :: Python :: 2.6",
7697 "Programming Language :: Python :: 2.7",
7698 "Programming Language :: Python :: 3",
7699 "Development Status :: 4 - Beta",
7700 "Environment :: Console",
7701 "License :: OSI Approved :: MIT License",
7702 ],
7703 keywords=["optimization", "CMA-ES", "cmaes"],
7704 py_modules=["cma"],
7705 requires=["numpy"],
7706 )
7707 fun = None
7708 elif argv[1] in ('plot',):
7709 plot(name=argv[2] if len(argv) > 2 else None)
7710 raw_input('press return')
7711 fun = None
7712 elif len(argv) > 3:
7713 fun = eval('fcts.' + argv[1])
7714 else:
7715 print('try -h option')
7716 fun = None
7717
7718 if fun is not None:
7719
7720 if len(argv) > 2: # dimension
7721 x0 = np.ones(eval(argv[2]))
7722 if len(argv) > 3: # sigma
7723 sig0 = eval(argv[3])
7724
7725 opts = {}
7726 for i in xrange(5, len(argv), 2):
7727 opts[argv[i - 1]] = eval(argv[i])
7728
7729 # run fmin
7730 if fun is not None:
7731 tic = time.time()
7732 fmin(fun, x0, sig0, opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10)
7733 # plot()
7734 # print ' best function value ', res[2]['es'].best[1]
7735 print('elapsed time [s]: + %.2f', round(time.time() - tic, 2))
7736
7737 elif not len(argv):
7738 fmin(fcts.elli, np.ones(6) * 0.1, 0.1, {'ftarget':1e-9})
7739
7740
7741 # ____________________________________________________________
7742 # ____________________________________________________________
7743 #
7744 # mainly for testing purpose
7745 # executed when called from an OS shell
7746 if __name__ == "__main__":
7747 # for i in range(1000): # how to find the memory leak
7748 # main(["cma.py", "rastrigin", "10", "5", "popsize", "200", "maxfevals", "24999", "verb_log", "0"])
7749 main()
7750
|
raphaelvalentin/qtlayout
|
syntax/optimize/cma/cma-1.0.07/cma-1.0.07.py
|
Python
|
gpl-2.0
| 391,778
|
[
"exciting"
] |
7ff36957f19e7b40a1c5c1a95e5c15b1a0a9e723e9b6a9ab1e00283acb03bb53
|
#!/usr/bin/env python
""" refresh CS
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
res = gRefresher.forceRefresh()
if not res['OK']:
print(res['Message'])
|
yujikato/DIRAC
|
tests/Jenkins/dirac-refresh-cs.py
|
Python
|
gpl-3.0
| 354
|
[
"DIRAC"
] |
18244feceff47eebc63e27ce9bf3709c7142e1b3c6b102e0757e678f2877b35c
|
__author__ = 'williewonka'
import json
from splinter import Browser
import argparse
import sys
import openpyxl
parser = argparse.ArgumentParser(description='queries a give list of patents from json object')
parser.add_argument('--janus', dest='janus', action='store_true', help='activates logging in from janus')
parser.set_defaults(janus = False)
parser.add_argument('--username', nargs='?', const=1, type=str, default='', help='username for janus proxy service')
parser.add_argument('--password', nargs='?', const=1, type=str, default='', help='password for janus proxy service')
parser.add_argument('--driver', nargs='?', const=1, type=str, default='firefox', help='browser drive: chrome/firefox. default firefox')
parser.add_argument('--downloadtype', nargs='?', const=1, type=str, default='patents', help='datatype of download: patents/citations')
parser.add_argument('--jsonfile', nargs='?', const=1, type=str, default='data_valid.json', help='specifies the json file with the patents')
JANUS, USERNAME, PASSWORD, DRIVER, DOWNLOADTYPE, JSON = parser.parse_args().janus, parser.parse_args().username, parser.parse_args().password, \
parser.parse_args().driver, parser.parse_args().downloadtype, parser.parse_args().jsonfile
if DOWNLOADTYPE == 'patents':
#get the data info
inputdata = json.loads(open(JSON, 'r').readlines()[0])
elif DOWNLOADTYPE == 'citations':
sheet = openpyxl.load_workbook('patentdata.xlsx').get_active_sheet()
inputdata = []
for cell in sheet.columns[4]:
inputdata.append(str(cell.value))
inputdata.pop(0)
else:
sys.exit('wrong downloadtype argument')
results = {}#dictioanry that holds the numer of found patents per company
#create a browser bot and visit the website
try:
browser = Browser(DRIVER)
except:
sys.exit('failed to load specified driver ' + DRIVER)
#go to the database
if JANUS:
url = 'http://apps.webofknowledge.com.dianus.libr.tue.nl/DIIDW_AdvancedSearch_input.do?' \
'SID=V2i7L6wGDEBBsnkAWFI&product=DIIDW&search_mode=AdvancedSearch'
browser.visit(url)
#this redirects to janus, fill in login info
browser.fill('user',USERNAME)
browser.fill('pass', PASSWORD)
#find and click the login button
browser.find_by_value('Login').first.click()
else:
url = 'http://apps.webofknowledge.com/DIIDW_AdvancedSearch_input.do?SID=N1cpglrQOdCmC16gM44&product=DIIDW&search_mode=AdvancedSearch'
browser.visit(url)
#if new session needs to be started click link
try:
browser.find_link_by_partial_text('new session').first.click()
except:
pass
def Build_Query_Citations(codes):
#iterate through the list
#build the query
query = "CD=("
for code in codes:
if query == "CD=(":
query += code
else:
query += " OR " + code
query += ")"
return query
def Build_Query_Patents(patents):
#iterate through the patents
#build the query
query = "PN=("
for patent in patents:
if query == "PN=(":
query += patent
else:
query += " OR " + patent
query += ")"
return query
def Execute_Query(patents,company,query):
if browser.url != url:
browser.visit(url)
#fil it in the query and click serch
browser.fill('value(input1)', query)
browser.find_by_css('.searchButtons').first.click()
#open the searchresults
try:
resultlink = browser.find_link_by_partial_href('summary.do').first
if company != "":
try:
results[company]['resultsize'] += int(resultlink.value)
except:
results[company]['resultsize'] = int(resultlink.value)
resultlink.click()
#look for number of pages in result
pages = int(browser.find_by_id('pageCount.top').value)
for i in range(1,pages+1):
#select all patents
browser.find_by_name('SelectPage').first.click()
#add everything to marked list
browser.find_by_css(".addToMarkedList").first.click()
#click on the next page button
try:
browser.find_by_css('.paginationNext').first.click()
except:
pass
print("queried " + str(len(patents)) + " patents for company " + company)
except KeyboardInterrupt:
print('no patents found for ' + company)
def chunks(l, n):
#Yield successive n-sized chunks from l.
for i in range(0, len(l), n):
yield l[i:i+n]
input()
if DOWNLOADTYPE == 'patents':
for company in list(inputdata.keys()):
patents = inputdata[company]
results[company] = {
'querysize' : len(patents)
}
for list_of_patents in chunks(patents,50):
Execute_Query(list_of_patents,company,Build_Query_Patents(list_of_patents))
print("parsing and saving results")
JSON = json.dumps(results)
stream = open('results.json', 'w')
stream.writelines(JSON)
stream.close()
print('results saved')
elif DOWNLOADTYPE == 'citations':
for list_of_codes in chunks(inputdata,50):
Execute_Query(list_of_codes,"",Build_Query_Citations(list_of_codes))
|
williewonka/USE-patents-project
|
querymaker.py
|
Python
|
gpl-2.0
| 5,222
|
[
"VisIt"
] |
e5613858d22fd9b22c8317f1ae98f5f1394711b11aa998dab4ce9f7448a1162b
|
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_NOT_EMPTY
from s3.s3fields import S3Represent
from s3.s3query import FS
from s3.s3utils import S3DateTime, s3_auth_user_represent_name, s3_avatar_represent
from s3.s3validators import IS_LOCATION_SELECTOR2, IS_ONE_OF
from s3.s3widgets import S3LocationSelectorWidget2
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
"""
Template settings for Requests Management
- for Philippines
"""
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ["Philippines", "demo/users"]
settings.base.system_name = T("Sahana")
settings.base.system_name_short = T("Sahana")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users don't need to be approved
#settings.auth.registration_requires_approval = True
# Organisation links are either done automatically
# - by registering with official domain of Org
# or Manually by Call Center staff
#settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Uncomment this to allow Admin to see Organisations in user Admin even if the Registration doesn't request this
settings.auth.admin_sees_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 5 # Apply Controller, Function and Table ACLs
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "Philippines"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
# ("tl", "Tagalog"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["PH"]
# Until we add support to LocationSelector2 to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"PHP" : T("Philippine Pesos"),
#"EUR" : T("Euros"),
#"GBP" : T("Great British Pounds"),
#"CHF" : T("Swiss Francs"),
"USD" : T("United States Dollars"),
}
settings.fin.currency_default = "PHP"
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Reports",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# Filter forms - style for Summary pages
#def filter_formstyle(row_id, label, widget, comment, hidden=False):
# return DIV(label, widget, comment,
# _id=row_id,
# _class="horiz_filter_form")
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an organisation
settings.hrm.org_required = False
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
#settings.org.site_label = "Office/Shelter/Hospital"
settings.org.site_label = "Site"
settings.org.site_autocomplete = True
# Extra fields to show in Autocomplete Representations
settings.org.site_autocomplete_fields = ["location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
]
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Host National Society"),
# 2: T("Partner"),
# 3: T("Donor"),
# #4: T("Customer"), # T("Beneficiary")?
# #5: T("Supplier"),
# 9: T("Partner National Society"),
#}
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
#settings.msg.notify_subject = "$S %s" % T("Notification")
settings.msg.notify_subject = "$S Notification"
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
# e.g. CHF
return v
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
pe_id = raw["pr_person.pe_id"]
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
query = (ltable.pe_id == pe_id)
row = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if row:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(row.user_id,
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
L3 = raw["gis_location.L3"]
L4 = raw["gis_location.L4"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
elif level == "L3":
represent = "%s (%s, %s)" % (name, L2, L1)
elif level == "L4":
represent = "%s (%s, %s, %s)" % (name, L3, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": list_id,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.location_id.belongs(locations))
count = stable.id.count()
row = db(query).select(count).first()
if row:
tally_sites = row[count]
else:
tally_sites = 0
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_reqs = row[count]
else:
tally_reqs = 0
table = s3db.req_commit
query = (table.deleted == False) & \
(table.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_commits = row[count]
else:
tally_commits = 0
if level == "L4":
next_Lx = ""
next_Lx_label = ""
else:
if level == "L0":
next_Lx = "L1"
next_Lx_label = "Regions"
if level == "L1":
next_Lx = "L2"
next_Lx_label = "Provinces"
elif level == "L2":
next_Lx = "L3"
next_Lx_label = "Municipalities / Cities"
elif level == "L3":
next_Lx = "L4"
next_Lx_label = "Barangays"
table = db.gis_location
query = (table.deleted == False) & \
(table.level == next_Lx) & \
(table.parent == record_id)
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_Lx = row[count]
else:
tally_Lx = 0
next_url = URL(c="gis", f="location",
args=["datalist"],
vars={"~.level": next_Lx,
"~.parent": record_id,
})
next_Lx_label = A(next_Lx_label,
_href=next_url,
)
next_Lx = SPAN(tally_Lx,
_class="badge",
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
)
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(next_Lx_label,
next_Lx,
T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_sites(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Facilities on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_facility.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_facility.name"]
site_id = raw["org_facility.id"]
opening_times = raw["org_facility.opening_times"] or ""
author = record["org_facility.modified_by"]
date = record["org_facility.modified_on"]
organisation = record["org_facility.organisation_id"]
organisation_id = raw["org_facility.organisation_id"]
location = record["org_facility.location_id"]
level = raw["gis_location.level"]
if level:
location_id = raw["org_facility.location_id"]
else:
location_id = raw["gis_location.parent"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or ""
phone = raw["org_facility.phone1"] or ""
facility_type = record["org_site_facility_type.facility_type_id"]
comments = record["org_facility.comments"] or ""
logo = raw["org_organisation.logo"]
site_url = URL(c="org", f="facility", args=[site_id, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
facility_status = raw["org_site_status.facility_status"] or ""
if facility_status:
if facility_status == 1:
icon = "thumbs-up-alt"
colour = "green"
elif facility_status == 2:
icon = "thumbs-down-alt"
colour = "amber"
elif facility_status == 3:
icon = "reply-all"
colour = "red"
elif facility_status == 4:
icon = "remove"
colour = "red"
elif facility_status == 99:
icon = "question"
colour = ""
facility_status = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Status"), record["org_site_status.facility_status"])),
" ",
_class="card_1_line %s" % colour,
)
power_supply_type = raw["org_site_status.power_supply_type"] or ""
if power_supply_type:
if power_supply_type == 1:
icon = "thumbs-up-alt"
colour = "green"
elif power_supply_type == 2:
icon = "cogs"
colour = "amber"
elif power_supply_type == 98:
icon = "question"
colour = "amber"
elif power_supply_type == 99:
icon = "remove"
colour = "red"
power_supply_type = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Power"), record["org_site_status.power_supply_type"])),
" ",
_class="card_1_line %s" % colour,
)
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_facility
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_facility.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
body = TAG[""](P(I(_class="icon-flag"),
" ",
SPAN(facility_type),
" ",
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
_class="card_1_line",
),
facility_status,
power_supply_type,
P(comments,
_class="card_manylines s3-truncate",
),
)
item = DIV(DIV(SPAN(A(name,
_href=site_url,
),
_class="card-title",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_organisations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail span6" # span6 for 2 cols
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = SPAN(XML(money_details),
_class="s3-truncate")
money_details = P(I(_class="icon icon-dollar"),
" ",
money_details,
_class="card_manylines",
)
else:
# Include anyway to make cards align
money_details = P(I(_class="icon icon-dollar"),
" ",
_class="card_1_line",
)
#time = raw["req_organisation_needs.vol"]
#if time:
# time_details = record["req_organisation_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
permit = current.auth.s3_has_permission
table = db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.obsolete == False) & \
(stable.organisation_id == record_id)
tally_sites = db(query).count()
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.organisation_id == record_id)
tally_reqs = db(query).count()
table = s3db.req_commit
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_commits = db(query).count()
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
#time_details,
P(T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_org_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_organisation_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_organisation_needs.modified_by"]
date = record["req_organisation_needs.modified_on"]
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = P(I(_class="icon icon-dollar"),
" ",
XML(money_details),
_class="card_manylines",
)
else:
money_details = ""
time = raw["req_organisation_needs.vol"]
if time:
time_details = record["req_organisation_needs.vol_details"]
time_details = P(I(_class="icon icon-time"),
" ",
XML(time_details),
_class="card_manylines",
)
else:
time_details = ""
org_id = raw["org_organisation.id"]
org_url = URL(c="org", f="organisation", args=[org_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_organisation_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="organisation_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_organisation_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Org Profile page - no need to repeat Org Name
title = " "
else:
title = raw["org_organisation.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
time_details,
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_org_needs = render_org_needs
# -----------------------------------------------------------------------------
def render_site_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_site_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
#contact = raw["org_facility.contact"] or ""
opening_times = raw["org_facility.opening_times"] or ""
phone = raw["org_facility.phone1"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_site_needs.modified_by"]
date = record["req_site_needs.modified_on"]
#goods = raw["req_site_needs.goods"]
#if goods:
# goods_details = record["req_site_needs.goods_details"]
# goods_details = P(I(_class="icon icon-truck"),
# " ",
# XML(goods_details),
# _class="card_1_line",
# )
#else:
# goods_details = ""
#time = raw["req_site_needs.vol"]
#if time:
# time_details = record["req_site_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
site_url = URL(c="org", f="facility", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=site_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_site_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="site_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_site_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Site Profile page - no need to repeat Site Name
title = " "
else:
title = raw["org_facility.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(#goods_details,
#time_details,
P(I(_class="icon icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
P(I(_class="icon icon-user"),
" ",
contact,
_class="card_1_line",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_site_needs = render_site_needs
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
"""
Customise gis_location controller
- Profile Page
"""
db = current.db
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
if r.method == "datalist":
# Lx selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 17
level = current.request.get_vars.get("~.level", None)
if not level:
# Just show PH L1s
level = "L1"
s3.filter = (table.L0 == "Philippines") & (table.level == "L1")
parent = current.request.get_vars.get("~.parent", None)
if level == "L1":
s3.crud_strings["gis_location"].title_list = T("Regions")
elif level == "L2":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Provinces in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Provinces")
elif level == "L3":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities")
elif level == "L4":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Barangays in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Barangays")
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
"L4",
]
s3db.configure("gis_location",
filter_widgets = None,
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
#customise_hrm_human_resource_fields()
customise_org_facility_fields()
s3db.req_customise_req_fields()
s3db.req_customise_commit_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
record_id = location.id
# Override context as that's a Path
default = "~.(location)=%s" % record_id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #label_create = "Create Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "icon-globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
#needs_widget = dict(label = "Needs",
# label_create = "Add New Need",
# type = "datalist",
# tablename = "req_site_needs",
# context = "location",
# icon = "icon-hand-up",
# multiple = False,
# # Would just show up on Sites
# show_on_map = False,
# list_layout = render_site_needs,
# )
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "location",
default = default,
filter = FS("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_req_list_layout,
)
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "location",
default = default,
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "location",
# default = default,
# #filter = FS("req_status").belongs([0, 1]),
# icon = "icon-wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "location",
default = default,
filter = FS("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc and loc.wkt:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class="icon icon-edit"),
_href=URL(c="gis", f="location",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["gis_location"].title_update,
)
else:
edit_btn = ""
name = location.name
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile-header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
#resources_widget,
sites_widget,
#locations_widget,
],
)
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_fields():
"""
Customise hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"person_id$pe_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_org_facility_fields():
"""
Customise org_facility for Profile widgets and 'more' popups
"""
# Truncate comments fields
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
tablename = "org_facility"
table = s3db.org_facility
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
field = table.comments
field.represent = lambda body: XML(s3_URLise(body))
field.comment = None
table.phone1.label = T("Phone")
# CRUD strings
ADD_FAC = T("Add Site")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_FAC,
title_display = T("Site Details"),
title_list = T("Sites"),
title_update = T("Edit Site"),
label_list_button = T("List Sites"),
label_delete_button = T("Delete Site"),
msg_record_created = T("Site Added"),
msg_record_modified = T("Site Updated"),
msg_record_deleted = T("Site Canceled"),
msg_list_empty = T("No Sites registered"))
list_fields = ["name",
"code",
"site_facility_type.facility_type_id",
"organisation_id",
"location_id",
"location_id$addr_street",
"location_id$level",
"location_id$parent",
"modified_by",
"modified_on",
"organisation_id$logo",
"opening_times",
"human_resource.person_id",
#"contact",
"phone1",
"status.facility_status",
"status.power_supply_type",
"comments",
]
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineComponentMultiSelectWidget(
"facility_type",
label = T("Facility Type"),
field = "facility_type_id",
widget = "multiselect",
),
"organisation_id",
"location_id",
"opening_times",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "human_resource_site",
# label = T("Focal Point"),
# field = ["human_resource_id"],
# multiple = False,
#),
#"contact",
"phone1",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "needs",
# label = T("Needs"),
# multiple = False,
#),
S3SQLInlineComponent(
"status",
label = T("Status"),
multiple = False,
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_facility
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
customise_org_facility_fields()
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
location_field = table.location_id
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
location_field.requires = IS_LOCATION_SELECTOR2(levels=levels)
location_field.widget = S3LocationSelectorWidget2(levels=levels,
show_address=True,
show_map=True)
# @ToDo: Proper button if we want this & amend functionality for Bootstrap)
#s3.cancel = True
if r.method == "datalist":
# Site selection page
# 2-column datalist, 6 rows per page
#s3.dl_pagelength = 12
#s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter(name = "type",
label = T("Type"),
field="site_facility_type.facility_type_id",
hidden = True,
),
S3OptionsFilter(name = "status",
label = T("Status"),
field = "status.facility_status",
hidden = True,
),
S3OptionsFilter(name = "power",
label = T("Power Supply"),
field = "status.power_supply_type",
hidden = True,
),
]
#get_vars = current.request.get_vars
#goods = get_vars.get("needs.goods", None)
#vol = get_vars.get("needs.vol", None)
#if goods:
# needs_fields = ["needs.goods_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Drop-off Goods")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Volunteer your time")
#else:
# yesno = {True: T("Yes"), False: T("No")}
# needs_fields = ["needs.goods_details", "needs.vol_details"]
# filter_widgets.insert(0, S3OptionsFilter("needs.goods",
# label = T("Drop-off Goods"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
# filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# label = T("Volunteer Time"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"code",
"comments",
], #+ needs_fields,
label = T("Search")))
s3db.configure("org_facility",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_sites,
filter_widgets = filter_widgets,
)
elif r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_site_needs_fields(profile=True)
s3db.req_customise_req_fields()
list_fields = ["name",
"id",
]
record = r.record
record_id = record.id
# @ToDo: Center on the Site
map_widget = dict(label = "Map",
type = "map",
context = "site",
icon = "icon-map",
height = 383,
width = 568,
)
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "site",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Sites
list_layout = render_contacts,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "site",
filter = FS("req_status").belongs([0, 1]),
icon = "icon-flag",
show_on_map = False, # Since they will show within Sites
list_layout = s3db.req_req_list_layout,
)
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "site",
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_facility"].title_update,
)
else:
edit_btn = ""
name = record.name
code = record.code
if code:
name_code = "%s - %s" % (name, code)
else:
name_code = code
location = table.location_id.represent(record.location_id)
organisation_id = record.organisation_id
db = current.db
otable = db.org_organisation
query = (otable.id == organisation_id)
org = db(query).select(otable.name,
otable.logo,
limitby=(0, 1)).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
# Add primary resource to map
# Lookup Marker (type-dependent)
ftable = s3db.org_facility
ltable = s3db.org_site_facility_type
query = (ftable == record_id) & \
(ftable.site_id == ltable.site_id)
facility_type = db(query).select(ltable.facility_type_id,
limitby = (0, 1)
).first()
# Lookup Marker
if facility_type:
layer_filter = "facility_type.facility_type_id=%s" % \
facility_type.id
else:
layer_filter = ""
marker = current.gis.get_marker(controller = "org",
function = "facility",
filter = layer_filter)
lat = None
lon = None
gtable = s3db.gis_location
query = (r.id == ftable.id) & \
(ftable.location_id == gtable.id)
lat_lon = db(query).select(gtable.lat,
gtable.lon,
limitby = (0,1)).first()
if lat_lon:
lat = lat_lon["gis_location.lat"]
lon = lat_lon["gis_location.lon"]
map_widget["lat"] = lat
map_widget["lon"] = lon
tablename = "org_facility"
layer = dict(name = record.name,
id = "profile-header-%s-%s" % (tablename, record_id),
active = True,
tablename = r.tablename,
url = "/%s/org/facility.geojson?facility.id=%s" % \
(r.application, record_id),
marker = marker,
)
s3db.configure(tablename,
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["org_facility"].title_list,
name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=logo,
),
H2(name),
record.code and P(record.code) or "",
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.comments,
_class="s3-truncate"),
_class="profile-header",
),
profile_layers = [layer],
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
contacts_widget,
],
)
if r.interactive or r.representation == "aadata":
# Configure fields
#table.code.readable = table.code.writable = False
#table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
elif r.representation == "geojson":
# Don't represent facility_status, but just show integers
s3db.org_site_status.facility_status.represent = None
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
if isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="facility",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Site"),
)
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_facility")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_facility")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
# @ToDo: Don't just hide but prevent building
#attr["rheader"] = None
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_needs_fields(profile=False):
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
table = s3db.req_organisation_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
table.vol.readable = table.vol.writable = False
table.vol_details.readable = table.vol_details.writable = False
# Hide money_details unless used
s3.jquery_ready.append(
'''$('#req_organisation_needs_money_details__row').hide()
$('#req_organisation_needs_money').change(function(){
$('#req_organisation_needs_money_details__row').toggle($(this).prop('checked'))
}).change()''')
list_fields = ["id",
"organisation_id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$logo",
"organisation_id$phone",
"organisation_id$website",
"money",
"money_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["organisation_id$name",
]
s3db.configure("req_organisation_needs",
list_fields=list_fields,
)
return
# -----------------------------------------------------------------------------
def customise_req_organisation_needs_controller(**attr):
"""
Customise req_organisation_needs controller
"""
customise_org_needs_fields()
return attr
settings.customise_req_organisation_needs_controller = customise_req_organisation_needs_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
- Profile Page
- Requests
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive or r.representation == "aadata":
# Load normal Model
s3db = current.s3db
table = s3db.org_organisation
list_fields = ["id",
"name",
"logo",
"phone",
"website",
"needs.money",
"needs.money_details",
#"needs.vol",
#"needs.vol_details",
]
if r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_org_facility_fields()
customise_org_needs_fields(profile=True)
s3db.org_customise_org_resource_fields("profile")
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
needs_widget = dict(label = "Needs",
label_create = "Add New Need",
type = "datalist",
tablename = "req_organisation_needs",
multiple = False,
context = "organisation",
icon = "icon-hand-up",
show_on_map = False,
list_layout = render_org_needs,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "organisation",
filter = FS("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_req_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "organisation",
# #filter = FS("req_status").belongs([0, 1]),
# icon = "icon-wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "organisation",
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "organisation",
filter = FS("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
record = r.record
record_id = record.id
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_organisation"].title_update,
)
else:
edit_btn = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=URL(c="default", f="download",
args=[record.logo]),
),
H2(record.name),
_class="profile-header",
),
profile_widgets = [reqs_widget,
map_widget,
# @ToDo: Move to profile_header
#needs_widget,
#resources_widget,
commits_widget,
needs_widget,
contacts_widget,
sites_widget,
]
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
# no other filter widgets here yet?
]
# Needs page
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
get_vars = current.request.get_vars
money = get_vars.get("needs.money", None)
#vol = get_vars.get("needs.vol", None)
if money:
needs_fields = ["needs.money_details"]
s3.crud_strings["org_organisation"].title_list = T("Organizations soliciting Money")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_organisation"].title_list = T("Organizations with remote Volunteer opportunities")
else:
yesno = {True: T("Yes"), False: T("No")}
needs_fields = ["needs.money_details", "needs.vol_details"]
filter_widgets.insert(0, S3OptionsFilter("needs.money",
options = yesno,
multiple = False,
cols = 2,
hidden = True,
))
#filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# options = yesno,
# multiple = False,
# cols = 2,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"acronym",
"website",
"comments",
] + needs_fields,
label = T("Search")))
ntable = s3db.req_organisation_needs
s3db.configure("org_organisation",
filter_widgets = filter_widgets
)
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Hide fields
field = s3db.org_organisation_organisation_type.organisation_type_id
field.readable = field.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Create Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_site_needs_fields(profile=False):
s3db = current.s3db
table = s3db.req_site_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["id",
"organisation_id$id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$name",
"organisation_id$logo",
"organisation_id$website",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
"location_id$addr_street",
"phone1",
#"goods",
#"goods_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["site_id$name"]
s3db.configure("req_site_needs",
list_fields=list_fields,
)
return
s3.customise_site_needs_fields = customise_site_needs_fields
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if request.controller != "default":
# CRUD Strings
ADD_CONTACT = T("Create Contact")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3AddResourceLink
site_field.comment = S3AddResourceLink(c="org", f="facility",
vars={"child": "site_id"},
label=T("Add New Site"),
title=T("Site"),
tooltip=T("If you don't see the Site in the list, you can add a new one by clicking link 'Add New Site'."))
# ImageCrop widget doesn't currently work within an Inline Form
s3db.pr_image.image.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
"site_contact",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
s3_sql_custom_fields = [
"first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options = [True]
)
),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Site"), "human_resource.site_id"),
(T("Site Contact"), "human_resource.site_contact"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
if r.id and request.controller == "default":
url_next = URL(c="default", f="person", args=[r.id, "read"])
else:
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person")
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
update_next = url_next,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % request.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed
current.response.s3.filter = (table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List New Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# -----------------------------------------------------------------------------
settings.req.req_type = ["Other"]
settings.req.requester_label = "Contact"
# Uncomment if the User Account logging the Request is NOT normally the Requester
settings.req.requester_is_author = False
# Uncomment to have Donations include a 'Value' field
settings.req.commit_value = True
# Uncomment if the User Account logging the Commitment is NOT normally the Committer
#settings.req.comittter_is_author = False
# Uncomment to allow Donations to be made without a matching Request
#settings.req.commit_without_request = True
# Set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
settings.req.requester_to_site = True
def customise_req_req_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
s3db = current.s3db
if r.component_name == "commit":
s3db.req_customise_commit_fields()
else:
s3db.req_customise_req_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.req_status.belongs([0, 1]))
elif r.method == "profile":
# Customise tables used by widgets
s3db.req_customise_commit_fields()
customise_org_facility_fields()
record = r.record
record_id = record.id
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "request",
default = "req_id=%s" % record_id,
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
filter = (FS("obsolete") == False)
sites_widget = dict(label = "Sites",
#label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
multiple = False,
context = "request",
filter = filter,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
if current.auth.s3_has_permission("update", r.table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="req", f="req",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["req_req"].title_update,
)
else:
edit_btn = ""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
site = db(query).select(stable.name,
stable.location_id,
stable.organisation_id,
limitby=(0, 1)
).first()
location = s3db.gis_LocationRepresent(sep=" | ")(site.location_id)
otable = db.org_organisation
org = db(otable.id == site.organisation_id).select(otable.name,
otable.logo,
limitby=(0, 1)
).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
s3db.configure("req_req",
profile_title = s3.crud_strings["req_req"].title_list,
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(site.name),
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.purpose,
_class="s3-truncate"),
_class="profile-header",
),
profile_widgets = [commits_widget,
sites_widget,
],
)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_req_controller = customise_req_req_controller
# -----------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
current.s3db.req_customise_commit_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.cancel != True)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# =============================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("event", Storage(
# name_nice = "Disasters",
# #description = "Events",
# restricted = True,
# module_type = None
#)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = None,
)),
#("project", Storage(
# name_nice = "Projects",
# restricted = True,
# module_type = None
#)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
#("vulnerability", Storage(
# name_nice = "Vulnerability",
# restricted = True,
# module_type = None
#)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
#("cr", Storage(
# name_nice = "Shelters",
# restricted = True,
# module_type = None
#)),
("supply", Storage(
name_nice = "Supply Chain Management",
restricted = True,
module_type = None
)),
])
|
gnarula/eden_deployment
|
private/templates/Philippines/config.py
|
Python
|
mit
| 128,063
|
[
"Amber"
] |
62f65749e38298b41b0e9361ae3b8f1eebe384588da89aa8d3b9f4287bdb0b90
|
import os, sys
import argparse
import types
import glob
import numpy as np
import pickle
import tensorflow as tf
import data
import model
from util import *
from learning import LearnerCls, LearnerDACls, LearnerClsSelf, LearnerConfPred
from learning import TempScalingCls as CalibratorCls
##TODO: clean-up tf options
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#gpus = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(gpus[0], True)
def main(args):
# ## init a snapshot path
# os.makedirs(args.train.save_root, exist_ok=True)
# ## init logger
# sys.stdout = Logger(os.path.join(args.train.save_root, 'out'))
# ## print args
# print_args(args)
snap_list = glob.glob(args.snapshot_prefix + '_*')
print(snap_list)
print("# experiments = ", len(snap_list))
## init gpus
if not args.cpu:
print("##GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print()
## init datasets
print("## init datasets")
ds_src = data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
)
assert(len(args.aug_params) == 1) ##TODO
ds_tar = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1])
ds_dom = data.DomainDataset(
data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
),
getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1]))
##TODO: redundant
ds_src_init = data.MultiSourceDataset(
args.data.src,
args.aug_params_init,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],##TODO
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
)
assert(len(args.aug_params) == 1) ##TODO
ds_tar_init = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params_init[0],##TODO
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1])
ds_dom_init = data.DomainDataset(
data.MultiSourceDataset(
args.data.src,
args.aug_params_init,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
),
getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params_init[0], ##TODO
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1]))
##TODO: redundant
ds_src_self = data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if...
)
assert(len(args.aug_params) == 1) ##TODO
ds_tar_self = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1],
double_aug=True if args.training_type=='selfcon' else False,
)
print()
if args.merge_train_val:
ds_src.train = data.ChainLoader(ds_src.train, ds_src.val)
ds_dom.train = data.ChainLoader(ds_dom.train, ds_dom.val)
## collect stats
cls_error_init_list, cal_error_init_list = [], []
cls_error_list, cal_error_list = [], []
perf_epoch_list = []
for snap_root in snap_list:
##
## final student
##
## a student model
mdl_st_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size)
mdl_st = model.Student(args.model, mdl_st_base, ds_src_self, ds_tar_self, ideal=args.ideal)
## load the final student
mdl_st.model_base.load_weights(os.path.join(snap_root, 'model_params_final'))
## evaluate
learner = LearnerClsSelf(None, None, mdl_st, None)
error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True)
cls_error_list = np.append(cls_error_list, error.numpy())
cal_error_list = np.append(cal_error_list, ece)
print(f"[final, {args.snapshot_prefix}, cls error, n = {len(cls_error_list)}] mean = {np.mean(cls_error_list*100.0):.2f}%, std = {np.std(cls_error_list*100.0):.2f}%")
print(f"[final, {args.snapshot_prefix}, cal error, n = {len(cal_error_list)}] mean = {np.mean(cal_error_list*100.0):.2f}%, std = {np.std(cal_error_list*100.0):.2f}%")
##
## init student
##
## load the init student
mdl_fn_init = os.path.basename(glob.glob(os.path.join(snap_root, 'model_params_*init*.index'))[0])
mdl_fn_init = mdl_fn_init[:mdl_fn_init.rfind('_')]
if 'sourceonly' in mdl_fn_init:
mdl_st.model_base.load_weights(os.path.join(snap_root, mdl_fn_init+'_best'))
learner = LearnerClsSelf(None, None, mdl_st, None)
else:
assert('advtr' in mdl_fn_init)
## init a adv model
mdl_adv = getattr(model, args.train_advtr.model_advtr)(n_in=mdl_st.model_base.dim_feat)
mdl_st_adv = model.DAN(mdl_st.model_base, mdl_adv)
mdl_st_adv.load_weights(os.path.join(snap_root, mdl_fn_init+'_final'))
## init a learner
learner = LearnerDACls(None, mdl_st_adv)
## evaluate
error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True)
cls_error_init_list = np.append(cls_error_init_list, error.numpy())
cal_error_init_list = np.append(cal_error_init_list, ece)
print(f"[init, {args.snapshot_prefix}, cls error, n = {len(cls_error_init_list)}] mean = {np.mean(cls_error_init_list*100.0):.2f}%, std = {np.std(cls_error_init_list*100.0):.2f}%")
print(f"[init, {args.snapshot_prefix}, cal error, n = {len(cal_error_init_list)}] mean = {np.mean(cal_error_init_list*100.0):.2f}%, std = {np.std(cal_error_init_list*100.0):.2f}%")
##
## teacher performance at each step
##
if args.no_mid_results:
continue
cls_error_epoch_list, cal_error_epoch_list, prec_epoch_list, cov_epoch_list = [], [], [], []
for i_epoch in range(1, args.train.n_epochs): # ignore the last
## load
print("!!!! currently load best, but may load final later")
mdl_st.model_base.load_weights(os.path.join(snap_root, f'model_params_base_epoch_{i_epoch}_best'))
## cls/cal error
learner = LearnerClsSelf(None, None, mdl_st, None)
error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True)
print(error.numpy(), ece)
## precision/coverage
learner = LearnerConfPred(None, mdl_st.model_conf, mdl_st.model_base)
## set a constant
mdl_st.model_conf.T = tf.Variable(1.0 - args.train_conf.eps)
## test the model
prec, n_conf, n = learner.test(ds_tar.test, ld_name='tar', verbose=True)
cls_error_epoch_list.append(error.numpy())
cal_error_epoch_list.append(ece)
prec_epoch_list.append(prec.numpy())
cov_epoch_list.append(float(n_conf.numpy())/float(n))
perf_epoch_list.append({
'cls_error': np.array(cls_error_epoch_list),
'cal_error': np.array(cal_error_epoch_list),
'prec': np.array(prec_epoch_list),
'cov': np.array(cov_epoch_list)})
print()
## save
fn = args.snapshot_prefix + '.pk'
pickle.dump(
{
'cls_error_init': cls_error_init_list,
'cal_error_init': cal_error_init_list,
'cls_error': cls_error_list,
'cal_error': cal_error_list,
'perf_epoch': perf_epoch_list
},
open(fn, 'wb'))
def init_aug_params(aug, args):
aug_params = []
for a in aug:
if a == 'jitter':
aug_params.append([('jitter', {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4})])
# elif a == 'shake':
# args.aug_params.append([('randaug', {'size': 32, 'mode': 'SHAKE'})])
elif a == 'svhnspec':
aug_params.append([
('intensity_flip', {}),
('intensity_scaling', {'min': -1.5, 'max': 1.5}),
('intensity_offset', {'min': -0.5, 'max': 0.5}),
('affine', {'std': 0.1}),
('translation', {'x_max': 2.0, 'y_max': 2.0}),
('gaussian', {'std': 0.1}),
])
elif a == 'translation':
aug_params.append([
('translation', {'x_max': 2.0, 'y_max': 2.0}),
])
elif a == 'randaug':
aug_params.append([('randaug', {'size': args.data.img_size[0]})])
else:
##TODO: simplify
aug_params.append(None)
return aug_params
def parse_args():
## inint a parser
parser = argparse.ArgumentParser(description='digit dataset training')
## meta args
parser.add_argument('--snapshot_prefix', type=str, required=True)
parser.add_argument('--no_mid_results', action='store_true')
#parser.add_argument('--exp_name', required=True, type=str, help='experiment name')
#parser.add_argument('--snapshot_root', default='snapshots', type=str, help='snapshot root name')
parser.add_argument('--cpu', action='store_true', help='use CPU')
parser.add_argument('--ideal', action='store_true', help='enable cheatkey')
parser.add_argument('--merge_train_val', action='store_true', help='merge train and validataion set')
parser.add_argument('--training_type', type=str, default='selfcon', help='snapshot root name') ## selfcon, self, advtr, srconly
## dataset args
parser.add_argument('--data.batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data.n_labels', default=10, type=int, help='the number of labels')
parser.add_argument('--data.src', type=str, nargs='*', default=['MNIST'], help='list of sources')
parser.add_argument('--data.tar', type=str, default='USPS', help='target')
parser.add_argument('--data.aug', type=str, nargs='*', default=[''], help='list of data augmentation')
parser.add_argument('--data.aug_init', type=str, nargs='*', default=[''], help='list of data augmentation')
parser.add_argument('--data.img_size', type=int, nargs=3, default=(32, 32, 3), help='image size')
parser.add_argument('--data.sample_ratio', type=float, nargs=2, default=[1.0, 1.0])
## model args
parser.add_argument('--model.base', default='ResNet18', type=str, help='model name')
parser.add_argument('--model.conf', default='ConfPred', type=str, help='model name')
parser.add_argument('--model.iw', default='BigFNN', type=str, help='model name')
# ## self-train args
# parser.add_argument('--train.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train.load_final', action='store_true', help='load the final model')
parser.add_argument('--train.n_epochs', type=int, default=50, help='the number of training iterations')
# parser.add_argument('--train.init_advtr', action='store_true', help='model initialization approach')
# parser.add_argument('--train.val_period', default=1, type=int, help='validation period in epochs')
# ## base model train args
# parser.add_argument('--train_base.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train_base.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_base.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_base.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_base.lr_step_size', default=5, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_base.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_base.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_base.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_base.n_epochs', default=25, type=int, help='the number of epochs')
# parser.add_argument('--train_base.val_period', default=1, type=int, help='validation period in epochs')
# ## iw train args
# parser.add_argument('--train_iw.rerun', action='store_true', help='find the best model')
# parser.add_argument('--train_iw.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_iw.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_iw.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_iw.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_iw.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_iw.n_epochs', default=100, type=int, help='the number of epochs')
# parser.add_argument('--train_iw.val_period', default=1, type=int, help='validation period in epochs')
# ## cal args
# parser.add_argument('--cal_iw.rerun', action='store_true', help='find the best model')
# parser.add_argument('--cal_iw.load_final', action='store_true', help='load the final model')
# parser.add_argument('--cal_iw.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--cal_iw.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--cal_iw.lr_step_size', default=50, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--cal_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--cal_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--cal_iw.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--cal_iw.n_epochs', default=500, type=int, help='the number of epochs')
# parser.add_argument('--cal_iw.val_period', default=1, type=int, help='validation period in epochs')
# ## train args
# parser.add_argument('--train_advtr.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train_advtr.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_advtr.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_advtr.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_advtr.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_advtr.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_advtr.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_advtr.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_advtr.n_epochs', default=100, type=int, help='the number of epochs')
# parser.add_argument('--train_advtr.val_period', default=1, type=int, help='validation period in epochs')
# parser.add_argument('--train_advtr.advtr_type', type=str, default='DANN', help='domain-adversarial training type')
parser.add_argument('--train_advtr.model_advtr', type=str, default='BigAdvFNN', help='adversarial network name')
# parser.add_argument('--train_advtr.reg_param_adv', type=float, default=1.0, help='adversarial loss regularization parameter')
# parser.add_argument('--train_advtr.no_adv_reg_schedule', action='store_true', help='do not schedule the adversarial loss regularization parameter')
# ## base model init train args
# parser.add_argument('--train_base_init.rerun', action='store_true', help='find the best model')
# parser.add_argument('--train_base_init.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_base_init.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_base_init.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_base_init.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_base_init.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_base_init.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_base_init.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_base_init.n_epochs', default=100, type=int, help='the number of epochs')
# parser.add_argument('--train_base_init.val_period', default=1, type=int, help='validation period in epochs')
# ## conf args
# #parser.add_argument('--train_conf.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train_conf.load_final', action='store_true', help='load the final model')
parser.add_argument('--train_conf.eps', type=float, default=0.01, help='epsilon')
args = parser.parse_args()
args = to_tree_namespace(args)
## duplicate
##TODO: better way?
# args.train.save_root = os.path.join(args.snapshot_root, args.exp_name)
# args.train_base.save_root = args.train.save_root
# args.train_iw.save_root = args.train.save_root
# args.cal_iw.save_root = args.train.save_root
# args.train_advtr.save_root = args.train.save_root
# args.train_base_init.save_root = args.train.save_root
# args.train_conf.save_root = args.train.save_root
args.model.n_labels = args.data.n_labels
args.model.img_size = args.data.img_size
# args.train_advtr.schedule_reg_param_adv = not args.train_advtr.no_adv_reg_schedule
# args.train_advtr.load_final = True
#args.train.load_final = True
#args.train_base.load_final = True
## init aug parameters
args.aug_params = init_aug_params(args.data.aug, args)
args.aug_params_init = init_aug_params(args.data.aug_init, args)
# args.aug_params = []
# for a in args.data.aug:
# if a == 'jitter':
# args.aug_params.append([('jitter', {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4})])
# # elif a == 'shake':
# # args.aug_params.append([('randaug', {'size': 32, 'mode': 'SHAKE'})])
# elif a == 'svhnspec':
# args.aug_params.append([
# ('intensity_flip', {}),
# ('intensity_scaling', {'min': -1.5, 'max': 1.5}),
# ('intensity_offset', {'min': -0.5, 'max': 0.5}),
# ('affine', {'std': 0.1}),
# ('translation', {'x_max': 2.0, 'y_max': 2.0}),
# ('gaussian', {'std': 0.1}),
# ])
# elif a == 'translation':
# args.aug_params.append([
# ('translation', {'x_max': 2.0, 'y_max': 2.0}),
# ])
# elif a == 'randaug':
# args.aug_params.append([('randaug', {'size': args.data.img_size[0]})])
# else:
# ##TODO: simplify
# args.aug_params.append(None)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
|
googleinterns/intern2020_cocal
|
uncertainty/plots/compute_stats.py
|
Python
|
apache-2.0
| 23,245
|
[
"Gaussian"
] |
0c2975d78e027b523d7a52c031c2c6b8a514b993741f9ab4ad92362357c52223
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import sys
import numpy as np
import pysam
import time
import os
from contextlib import closing
TAG_HAPLOTYPE = "ht"
TAG_CHUNK_ID = "mp"
PB_HAP1_READS = "pb_hap1"
PB_HAP2_READS = "pb_hap2"
PB_LENGTH = "pb_len"
PB_BLOCK_ID = "pb_block_id"
PB_START_POS = PB_BLOCK_ID
PB_END_POS = "pb_end_pos"
RD_ID = "read_id"
RD_ALN_START = "rd_aln_start"
RD_ALN_END = "rd_aln_end"
RD_PHASE_BLOCKS = "rd_chunk_pbs"
RD_HAPLOTYPE_TAG = "rd_haplotype_tag"
RPB_BLOCK_ID = "rpb_bid"
RPB_READ_START = "rpb_rs"
RPB_READ_LENGTH = "rpb_rl"
RPB_IS_HAP1 = "rpb_h1"
percent=lambda s, b: int(100.0 * s / b)
def parse_args(args = None):
parser = argparse.ArgumentParser("Merges phasing for two overlapping BAM/SAM files")
parser.add_argument('--chunkLeft', '-l', dest='chunkLeft', required=True, type=str,
help='Location of left chunk BAM/SAM')
parser.add_argument('--chunkRight', '-r', dest='chunkRight', required=True, type=str,
help='Location of right chunk BAM/SAM')
return parser.parse_args() if args is None else parser.parse_args(args)
def log(msg, depth=0):
print("\t" * depth + str(msg))
def merge_chunks__parse_chunk_id(chunk_id):
parts = chunk_id.split(",")
if len(parts) != 2 or len(parts[1].split("-")) != 2:
log("malformed chunk_id: {}".format(chunk_id))
return None, None, None
positions = parts[1].split("-")
chunk_idx, chunk_start, chunk_end = int(parts[0]), int(positions[0]), int(positions[1])
return chunk_idx, chunk_start, chunk_end
def merge_chunks__parse_phase_info(phase_info):
parts = phase_info.split(",")
if len(parts) != 4 or not (parts[0].startswith('h') and parts[1].startswith('p') and parts[2].startswith('r')
and parts[3].startswith('l')):
log("malformed phase_info: {}".format(phase_info))
return None, None, None, None
parts = map(lambda x: int(x[1:]), parts)
haplotype, phase_block, read_start, read_length = parts[0], parts[1], parts[2], parts[3]
return haplotype, phase_block, read_start, read_length
def merge_chunks__encode_phase_info(read_data):
haplotype = 0 if read_data[RPB_IS_HAP1] is None else (1 if read_data[RPB_IS_HAP1] else 2)
phase_block = read_data[RPB_BLOCK_ID]
read_start = read_data[RPB_READ_START]
read_length = read_data[RPB_READ_LENGTH]
return "h{},p{},r{},l{}".format(haplotype, phase_block, read_start, read_length)
def merge_chunks__save_read_info(all_reads, read):
# get info
read_id = read.query_name
align_start = read.reference_start
align_end = read.reference_end
# save storage data
read_data = dict()
all_reads[read_id] = read_data
read_data[RD_ID] = read_id
read_data[RD_ALN_START] = align_start
read_data[RD_ALN_END] = align_end
read_data[RD_HAPLOTYPE_TAG] = read.get_tag(TAG_HAPLOTYPE)
read_data[RD_PHASE_BLOCKS] = list()
# return data
return read_data
def merge_chunks__save_phase_block_info(phase_blocks, phase_info, read_id):
# get info
haplotype, phase_block, read_start, read_length = merge_chunks__parse_phase_info(phase_info)
if haplotype == 0:
return None
# get storage data
if phase_block in phase_blocks:
phase_block_data = phase_blocks[phase_block]
else:
phase_block_data = dict()
phase_blocks[phase_block] = phase_block_data
phase_block_data[PB_BLOCK_ID] = phase_block
phase_block_data[PB_HAP1_READS] = set()
phase_block_data[PB_HAP2_READS] = set()
phase_block_data[PB_END_POS] = None
phase_blocks[phase_block] = phase_block_data
# read pb info
read_phase_block_info = dict()
read_phase_block_info[RPB_BLOCK_ID] = phase_block
read_phase_block_info[RPB_READ_START] = read_start
read_phase_block_info[RPB_READ_LENGTH] = read_length
read_phase_block_info[RPB_IS_HAP1] = None
# save read
if haplotype == 1:
phase_block_data[PB_HAP1_READS].add(read_id)
read_phase_block_info[RPB_IS_HAP1] = True
elif haplotype == 2:
phase_block_data[PB_HAP2_READS].add(read_id)
read_phase_block_info[RPB_IS_HAP1] = False
else:
log("unknown haplotype in phase_info for read {}: {}".format(read_id, phase_info))
# return read phase data
return read_phase_block_info
def merge_chunks__read_chunk(chunk_location):
# log
log("{}:".format(chunk_location))
# data storage
phase_blocks = dict()
reads = dict()
read_count = 0
failed_reads = 0
with closing(pysam.AlignmentFile(chunk_location, 'rb' if chunk_location.endswith("bam") else 'r')) as aln:
log("reading", depth=1)
start = time.time()
for read in aln.fetch():
# get read data
read_id = read.query_name
read_count += 1
# find haplotype tag
for tag in [TAG_HAPLOTYPE, TAG_CHUNK_ID]:
if not read.has_tag(tag):
log("read {} had no {} tag".format(read_id, tag), depth=2)
failed_reads += 1
continue
# save read data
read_data = merge_chunks__save_read_info(reads, read)
# save haplotpye data
haplotype_tags = read.get_tag(TAG_HAPLOTYPE).split(";")
for pb_tag in haplotype_tags:
rpb_info = merge_chunks__save_phase_block_info(phase_blocks, pb_tag, read_id)
if rpb_info is not None: read_data[RD_PHASE_BLOCKS].append(rpb_info)
log("read {} reads ({}s)".format(read_count, int(time.time() - start)), depth=2)
# finish phase block analysis
phase_block_ids = list(phase_blocks.keys())
phase_block_ids.sort()
prev_pb = None
for pb_id in phase_block_ids:
curr_pb = phase_blocks[pb_id]
if prev_pb is not None: prev_pb[PB_END_POS] = curr_pb[PB_START_POS]
prev_pb = curr_pb
# we aren't going to use this last one anyway
prev_pb[PB_END_POS] = prev_pb[PB_START_POS]
# return chunk data
return reads, phase_blocks
def merge_chunks__create_new_phase_block_at_position(split_position, l_read, r_read):
# get all documented haplotpyes
l_haps = list(filter(lambda x: x[RPB_BLOCK_ID] < split_position, l_read[RD_PHASE_BLOCKS]))
r_haps = list(filter(lambda x: x[RPB_BLOCK_ID] >= split_position, r_read[RD_PHASE_BLOCKS]))
# data we want at the end
haps = list()
old_right_haplotype = None
# get desired (and modified) haplotypes from l_read
for hap in l_haps:
if hap[RPB_BLOCK_ID] >= split_position:
continue # belongs to r_read
elif hap[RPB_BLOCK_ID] + hap[RPB_READ_LENGTH] < split_position:
haps.append(hap) # before split
else: # this read needs to be split
new_hap = {
RPB_IS_HAP1: hap[RPB_IS_HAP1],
RPB_BLOCK_ID: hap[RPB_BLOCK_ID],
RPB_READ_START: hap[RPB_READ_START],
RPB_READ_LENGTH: split_position - hap[RPB_BLOCK_ID]
}
haps.append(new_hap)
# get desired (and modified) haplotypes from r_read
for hap in r_haps:
if hap[RPB_BLOCK_ID] >= split_position:
haps.append(hap) # after split
elif hap[RPB_BLOCK_ID] + hap[RPB_READ_LENGTH] < split_position:
continue # belongs to l_read
else: # this read needs to be split
split_diff = split_position - hap[RPB_BLOCK_ID]
new_hap = {
RPB_IS_HAP1: hap[RPB_IS_HAP1],
RPB_BLOCK_ID: split_position,
RPB_READ_START: hap[RPB_READ_START] + split_diff,
RPB_READ_LENGTH: hap[RPB_READ_LENGTH] - split_diff
}
haps.append(new_hap)
# sanity check and save old haplotype
if old_right_haplotype is not None: raise Exception("SANITY_CHECK_FAIL: " +
"found multiple phase_blocks ({}, {}) spanning split_position {} for read {}:".format(
old_right_haplotype, hap[RPB_BLOCK_ID], split_position, l_read[RD_ID]))
old_right_haplotype = hap[RPB_BLOCK_ID]
# sanity check
if old_right_haplotype is None: raise Exception(
"SANITY_CHECK_FAIL: found no phase_blocks spanning split_position {} for read {}:".format(
split_position, l_read[RD_ID]))
# save haploptyes
haps.sort(key=lambda x: x[RPB_BLOCK_ID])
new_hap_str = ";".join(map(merge_chunks__encode_phase_info, haps))
return new_hap_str, old_right_haplotype
def merge_chunks__organize_reads_and_blocks(l_reads, l_phase_blocks, r_reads, r_phase_blocks):
# get all phase blocks with same start pos
all_phase_blocks = list(set(l_phase_blocks.keys()).union(set(r_phase_blocks.keys())))
all_phase_block_count = len(all_phase_blocks)
# get all phase blocks with same start pos
shared_phase_blocks = list(set(l_phase_blocks.keys()).intersection(set(r_phase_blocks.keys())))
shared_phase_blocks.sort()
# get phase blocks by start and end pos
l_pb_uniq_ids = list(map(lambda x: "{}-{}".format(x[PB_START_POS], x[PB_END_POS]), l_phase_blocks.values()))
r_pb_uniq_ids = set(map(lambda x: "{}-{}".format(x[PB_START_POS], x[PB_END_POS]), r_phase_blocks.values()))
# find all matches
shared_phase_blocks = list()
perfect_matches = list()
inverted_matches = list()
for l_pb_uniq in l_pb_uniq_ids:
if l_pb_uniq in r_pb_uniq_ids:
# we know phase block positions align
shared_phase_blocks.append(l_pb_uniq)
shared_phase_block = int(l_pb_uniq.split("-")[0])
phase_block_median_pos = int((int(l_pb_uniq.split("-")[0]) + int(l_pb_uniq.split("-")[1]))/2.0)
# get all reads in phase block (get from both blocks, in case some were skipped)
reads_in_phase_block = set()
for reads in [l_phase_blocks[shared_phase_block][PB_HAP1_READS],l_phase_blocks[shared_phase_block][PB_HAP2_READS],
r_phase_blocks[shared_phase_block][PB_HAP1_READS],r_phase_blocks[shared_phase_block][PB_HAP2_READS]]:
for read in reads:
reads_in_phase_block.add(read)
# get reads spannign median position
reads_spanning_median = list(filter(
lambda x: l_reads[x][RD_ALN_START] <= phase_block_median_pos and
l_reads[x][RD_ALN_END] >= phase_block_median_pos,
reads_in_phase_block))
# perfect match?
perfect_matched_reads = list(filter(
lambda x: l_reads[x][RD_HAPLOTYPE_TAG] == r_reads[x][RD_HAPLOTYPE_TAG], reads_spanning_median
))
if len(perfect_matched_reads) == len(reads_spanning_median):
perfect_matches.append(l_pb_uniq)
continue
# inverted match?
inverted_matched_reads = list(filter(
lambda x: l_reads[x][RD_HAPLOTYPE_TAG].replace("h1","<TMP>").replace("h2","h1").replace("<TMP>","h2")
== r_reads[x][RD_HAPLOTYPE_TAG], reads_spanning_median
))
if len(inverted_matched_reads) == len(reads_spanning_median):
inverted_matches.append(l_pb_uniq)
continue
# loggit
log("Found {} distinct phase blocks".format(all_phase_block_count))
log("Found {} ({}%) perfect matches".format(
len(perfect_matches), percent(len(perfect_matches), all_phase_block_count)))
log("Found {} ({}%) inverted matches".format(
len(inverted_matches), percent(len(inverted_matches), all_phase_block_count)))
log("Found {} ({}%) matched phase starts".format(
len(shared_phase_blocks), percent(len(shared_phase_blocks), all_phase_block_count)))
# return what we found
return all_phase_blocks, perfect_matches, inverted_matches, shared_phase_blocks
def merge_chunks__recommend_merge_strategy(chunk_boundary, perfect_matches, inverted_matches, shared_phase_blocks):
# helper function
def pick_closest_elem(elems,center):
elems.sort(key=lambda x: abs(center - sum(map(int, str(x).split("-"))) / len(str(x).split("-"))))
return elems[0]
split_position, phase_block, invert_right, decision_summary = None, None, None, None
# case1: perfect match:
# READS: left of and spanning split_pos from left chunk, starting after split_pos from right chunk
# CALLS: left of split pos from left VCF, right of split pos from right VCF
if len(perfect_matches) > 0:
parts = map(int, pick_closest_elem(perfect_matches, chunk_boundary).split("-"))
split_position = int(np.mean(parts))
phase_block = parts[0]
invert_right = False
decision_summary = "PERFECT_MATCH"
log("Found perfect match at pos {} in phase block {}".format(split_position, phase_block))
# case2: perfect match but inverted haploptyes
# READS: left of and spanning split_pos from left chunk, starting after split_pos from right chunk
# reverse haplotype of included reads in phase_block from right chunk
# CALLS: left of split pos from left VCF, right of split pos from right VCF
# reverse phasing of calls in phase block from right chunk
elif len(inverted_matches) > 0:
parts = map(int, pick_closest_elem(inverted_matches, chunk_boundary).split("-"))
split_position = int(np.mean(parts))
phase_block = parts[0]
invert_right = True
decision_summary = "INVERT_MATCH"
log("Found inverted match at pos {} in phase block {}".format(split_position, phase_block))
# case3: found a phase block starting at the same posistion in each chunk
# READS: finishing before split_pos from left chunk, starting after split pos from right chunk
# reads spanning split_pos get hap info from left before split_pos, and hap info from right after and including
# CALLS: left of split pos from left VCF, right of split pos from right VCF
elif len(shared_phase_blocks) > 0:
phase_block = pick_closest_elem(shared_phase_blocks)
split_position = phase_block
invert_right = False
decision_summary = "PHASE_START_MATCH"
log("Found phase block start match at {}".format(phase_block))
# case4: no matching phase blocks
# READS: finishing before split_pos from left chunk, reads spanning split_pos get phasing finishing left of
# split_pos from left chunk, phasing in phase blocks spanning split_pos get split in two, phasing in phase
# blocks starting after split_pos from right chunk
# CALLS: starting left of split_pos from left VCF, starting after split_pos from right VCF, calls from right
# phase block spanning split_pos get new phase_block
else:
phase_block = None
split_position = chunk_boundary
invert_right = False
decision_summary = "NO_MATCH"
log("Found no match, creating new phase block at {}".format(split_position))
# return data
return split_position, phase_block, invert_right, decision_summary
def merge_chunks__specify_split_action(split_position, phase_block, invert_right,
l_reads, l_phase_blocks, r_reads, r_phase_blocks):
# describes read inclusion and modifications to haplotype string
left_reads_writing = dict()
right_reads_writing = dict()
right_phase_block_conversion = dict()
for read in l_reads.values():
# this read belongs wholly to the right chunk
if read[RD_ALN_START] > split_position:
continue
# this read belongs wholly to the left chunk
elif read[RD_ALN_END] <= split_position:
left_reads_writing[read[RD_ID]] = None
# this read spans the split_position (and needs analysis)
elif read[RD_ALN_START] <= split_position and read[RD_ALN_END] > split_position:
# case4: new phase block created at split pos
if phase_block is None:
l_read = read
r_read = r_reads[read[RD_ID]]
new_hap_str, old_right_haplotype = merge_chunks__create_new_phase_block_at_position(split_position,
l_read, r_read)
left_reads_writing[read[RD_ID]] = new_hap_str
right_phase_block_conversion[old_right_haplotype] = split_position
# santity check
if len(right_phase_block_conversion) > 1:
raise Exception("SANITY_CHECK_FAIL: got inconsistent phase blocks ({}) spanning {} for read {}"
.format(right_phase_block_conversion.keys(), split_position, read[RD_ID]))
# case3: take hap info before split_pos from left, after right. phase block exists at split_pos
elif phase_block == split_position:
l_read = l_reads[read[RD_ID]]
r_read = r_reads[read[RD_ID]]
haps = list(filter(lambda x: x[RPB_BLOCK_ID] < split_position, l_read[RD_PHASE_BLOCKS]))
haps.extend(list(filter(lambda x: x[RPB_BLOCK_ID] >= split_position, r_read[RD_PHASE_BLOCKS])))
haps.sort(key=lambda x: x[RPB_BLOCK_ID])
new_hap_str = ";".join(map(merge_chunks__encode_phase_info, haps))
left_reads_writing[read[RD_ID]] = new_hap_str
# case2, case1:
else:
left_reads_writing[read[RD_ID]] = True
# get right reads we care about (reads in the spanning-the-split_pos phase chunk)
# (everthing before split_pos comes from left chunk, everything after is unchanged)
analysis_read_ids = set()
if phase_block is None:
if len(right_phase_block_conversion) == 0:
log("No reads spanning {} were found!".format(split_position))
else:
analysis_phase_block_id = list(right_phase_block_conversion.keys())[0]
analysis_phase_block = r_phase_blocks[analysis_phase_block_id]
analysis_read_ids = analysis_phase_block[PB_HAP1_READS].union(analysis_phase_block[PB_HAP2_READS])
else:
analysis_phase_block = r_phase_blocks[phase_block]
analysis_read_ids = analysis_phase_block[PB_HAP1_READS].union(analysis_phase_block[PB_HAP2_READS])
for read_id in analysis_read_ids:
read = r_reads[read_id]
# this read belongs wholly to the left chunk
if read[RD_ALN_END] <= split_position:
continue
# this was analyzed with the left reads
elif read_id in left_reads_writing:
continue
# now we need to analyize - we know these reads start after split_pos
# case4
if phase_block is None:
if len(right_phase_block_conversion) == 0:
raise Exception("SANITY_CHECK_FAIL: new phase block determined, but no conversion for read {}"
.format(read_id))
pb_from = list(right_phase_block_conversion.keys())[0]
pb_to = right_phase_block_conversion[pb_from]
new_hap_str = read[RD_HAPLOTYPE_TAG].replace("p{},".format(pb_from), "p{},".format(pb_to))
right_reads_writing[read_id] = new_hap_str
# case2
elif invert_right:
h1_str = "h1,p{}".format(phase_block)
h1_tmp = "h1,p<TMP>"
h2_str = "h2,p{}".format(phase_block)
new_hap_str = read[RD_HAPLOTYPE_TAG].replace(h1_str, h1_tmp).replace(h2_str, h1_str).replace(h1_tmp, h2_str)
right_reads_writing[read_id] = new_hap_str
# case1, case3
else:
pass
# summarize vcf
vcf_split_position = split_position
vcf_right_phase_action = None
if invert_right:
right_phase_action = "INVERT"
elif phase_block is None:
if len(right_phase_block_conversion) != 0:
right_phase_action = right_phase_block_conversion
else:
# no reads span this, so no action
pass
else:
# case1 or case 3: no action
pass
# finish
return left_reads_writing, right_reads_writing, vcf_split_position, vcf_right_phase_action
def merge_chunks__determine_chunk_splitting(args = None):
# get read and phase block info
l_reads, l_phase_blocks = merge_chunks__read_chunk(args.chunkLeft)
r_reads, r_phase_blocks = merge_chunks__read_chunk(args.chunkRight)
# organize chunk comparison
all_phase_blocks, perfect_matches, inverted_matches, shared_phase_blocks = merge_chunks__organize_reads_and_blocks(
l_reads, l_phase_blocks, r_reads, r_phase_blocks)
# todo
chunk_boundary = np.median(all_phase_blocks)
# recommend strategy
split_position, phase_block, invert_right, decision_summary = merge_chunks__recommend_merge_strategy(
chunk_boundary, perfect_matches, inverted_matches, shared_phase_blocks)
# implement strategy
left_reads_writing, right_reads_writing, vcf_split_pos, vcf_right_phase_action = merge_chunks__specify_split_action(
split_position, phase_block, invert_right, l_reads, l_phase_blocks, r_reads, r_phase_blocks)
# log summarization
log("read merge action: write {} from left, {} from right".format(len(left_reads_writing), len(right_reads_writing)))
log("call merge action: split at {}, right action {}".format(vcf_split_pos, vcf_right_phase_action))
"""
CASES
1) perfect split
split_position
phase_block != split_position
invert_right = False
take reads left of and spanning split_position from prev chunk
take reads starting after split_position from curr chunk
take calls based on split_position
no modification
ITERATION:
reads written from prev
2) inverted split
split_position
phase_block != split_position
invert_right = True
take reads left of and spanning split_position from prev chunk
take reads starting after split_position from curr chunk
take calls based on split_position
reverse haplotype string of phase_block from read in curr chunk
reverse phasing on calls in vcf
ITERATION:
reads written from prev
reads written with reversed phase block
3) shared phase start
split_position
phase_block = split_position
invert_right = False
take reads finishing left of split_position from prev chunk
take reads spanning start of split_position from prev and curr chunk,
keep haplotyping from prev chunk until split_pos
modify haplotyping to include phasing after split pos
take reads starting after phase from curr chunk
take calls based on split_position
ITERATION:
reads written from prev
reads written from curr
4) new phase block
split_position
phase_block = None
invert_right = False
take reads finishing left of split_position
take reads spanning split_position phase from prev and curr chunk,
keep haplotyping from prev chunk until split_pos
modify haplotyping to include phasing after split pos
ITERATION:
reads written from prev
reads written from curr
"""
if __name__ == "__main__":
# get our arguments
args = parse_args()
assert False not in [map(os.path.isfile, [args.chunkLeft, args.chunkRight])]
merge_chunks__determine_chunk_splitting(args.chunkLeft, args.chunkRight)
|
benedictpaten/marginPhase
|
toil/src/toil_marginphase/scripts/merge_chunks.py
|
Python
|
mit
| 23,945
|
[
"pysam"
] |
a46af808076a34d4d0562b1456bfb538334b590eb27aadcc423f59033a548fdd
|
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import print_function, division
from tamkin.data import Molecule
from molmod.periodic import periodic
from molmod.unit_cells import UnitCell
from molmod.units import angstrom, amu
import numpy as np
__all__ = ["load_molecule_cp2k", 'load_fixed_cp2k']
def load_molecule_cp2k(fn_sp, fn_freq, multiplicity=1, is_periodic=True):
"""Load a molecule with the Hessian from a CP2K computation
Arguments:
| fn_sp -- The filename of the single point .out file containing the
energy and the forces.
| fn_freq -- The filename of the frequency .out file containing the
hessian
Optional arguments:
| multiplicity -- The spin multiplicity of the electronic system
[default=1]
| is_periodic -- True when the system is periodic in three dimensions.
False when the systen is aperiodic. [default=True]
| unit_cell -- The unit cell vectors for periodic structures
"""
# auxiliary routine to read atoms
def atom_helper(f):
# skip some lines
for i in range(3):
f.readline()
# read the atom lines until an empty line is encountered
numbers = []
coordinates = []
masses = []
while True:
line = f.readline()
if len(line.strip()) == 0:
break
symbol = line[14:19].strip()[:2]
atom = periodic[symbol]
if atom is None:
symbol = symbol[:1]
atom = periodic[symbol]
if atom is None:
numbers.append(0)
else:
numbers.append(atom.number)
coordinates.append([float(line[22:33]), float(line[34:45]), float(line[46:57])])
masses.append(float(line[72:]))
numbers = np.array(numbers)
coordinates = np.array(coordinates)*angstrom
masses = np.array(masses)*amu
return numbers, coordinates, masses
# auxiliary routine to read forces
def force_helper(f, skip, offset):
# skip some lines
for i in range(skip):
f.readline()
# Read the actual forces
tmp = []
while True:
line = f.readline()
if line == "\n":
break
if line == "":
raise IOError("End of file while reading gradient (forces).")
words = line.split()
try:
tmp.append([float(words[offset]), float(words[offset+1]), float(words[offset+2])])
except ValueError:
break
return -np.array(tmp) # force to gradient
# go through the single point file: energy and gradient
energy = None
gradient = None
with open(fn_sp) as f:
while True:
line = f.readline()
if line == "":
break
if line.startswith(" ENERGY|"):
energy = float(line[58:])
elif line.startswith(" MODULE") and "ATOMIC COORDINATES" in line:
numbers, coordinates, masses = atom_helper(f)
elif line.startswith(" FORCES|"):
gradient = force_helper(f, 0, 1)
break
elif line.startswith(' ATOMIC FORCES in [a.u.]'):
gradient = force_helper(f, 2, 3)
break
if energy is None or gradient is None:
raise IOError("Could not read energy and/or gradient (forces) from single point file.")
# go through the freq file: lattic vectors and hessian
with open(fn_freq) as f:
vectors = np.zeros((3, 3), float)
for line in f:
if line.startswith(" CELL"):
break
for axis in range(3):
line = next(f)
vectors[:,axis] = np.array( [float(line[29:39]), float(line[39:49]), float(line[49:59])] )
unit_cell = UnitCell(vectors*angstrom)
free_indices = _load_free_low(f)
if len(free_indices) > 0:
total_size = coordinates.size
free_size = len(free_indices)
hessian = np.zeros((total_size, total_size), float)
i2 = 0
while i2 < free_size:
num_cols = min(5, free_size - i2)
next(f) # skip two lines
next(f)
for j in range(free_size):
line = next(f)
words = line.split()
for i1 in range(num_cols):
hessian[free_indices[i2 + i1], free_indices[j]] = \
float(words[i1 + 2])
i2 += num_cols
else:
raise IOError("Could not read hessian from freq file.")
# symmetrize
hessian = 0.5*(hessian+hessian.transpose())
# cp2k prints a transformed hessian, here we convert it back to the normal
# hessian in atomic units.
conv = 1e-3*np.array([masses, masses, masses]).transpose().ravel()**0.5
hessian *= conv
hessian *= conv.reshape((-1,1))
return Molecule(
numbers, coordinates, masses, energy, gradient,
hessian, multiplicity, 0, is_periodic, unit_cell=unit_cell
)
def _load_free_low(f):
'''Helper function to load the indices of the free atoms from a CP2K freq file
Parameters
----------
f: open file
An open file object with the CP2K freq output.
Returns
-------
free_indices: list of int
A list of integer indexes of the free rows and columbs of the Hessian matrix.
'''
free_indices = []
for line in f:
if line.startswith(" VIB| Vibrational Analysis Info"):
break
for line in f:
if line.startswith(" VIB| REPLICA Nr."):
words = line.split()
if words[-2] == '+':
free_index = 3*(int(words[-5])-1)
if words[-3] == 'Y':
free_index += 1
elif words[-3] == 'Z':
free_index += 2
free_indices.append(free_index)
if line.startswith(" VIB| Hessian in cartesian coordinates"):
break
return free_indices
def load_fixed_cp2k(fn_freq):
'''Load the fixed atoms from a CP2K freq output
Parameters
----------
fn_freq: str
The filename of the CP2K freq output
Returns
-------
fixed: numpy bool array
The length of the vector corresponds to the number of atoms. For every fixed
atom, the vector element is True. False otherwise.
'''
with open(fn_freq) as f:
natom = None
for line in f:
if line.startswith(' - Atoms: '):
natom = int(line.split()[-1])
break
free_indices = _load_free_low(f)
if natom is None:
raise IOError('Could not read number of atoms from CP2K output.')
if len(free_indices) == 0:
raise IOError('Could not find the free atoms.')
free_atoms = np.array(free_indices[::3]) // 3
return np.array([i for i in range(natom) if i not in free_atoms])
|
molmod/tamkin
|
tamkin/io/cp2k.py
|
Python
|
gpl-3.0
| 8,743
|
[
"CP2K"
] |
70e5c2b9b77c83b76c5de5a3d10e16666f36af9ec1d0b5ceb62b7fce499790ce
|
"""
Django module container for classes and operations related to the "Course Module" content type
"""
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule import course_metadata_utils
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.exceptions import UndefinedContext
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList
from xmodule.mixin import LicenseMixin
import json
from xblock.core import XBlock
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from .fields import Date
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s", toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(
help=_("List of pairs of (title, url) for textbooks used in this course"),
default=[],
scope=Scope.content
)
wiki_slug = String(help=_("Slug that points to the wiki for this course"), scope=Scope.content)
enrollment_start = Date(help=_("Date that enrollment for this class is opened"), scope=Scope.settings)
enrollment_end = Date(help=_("Date that enrollment for this class is closed"), scope=Scope.settings)
start = Date(
help=_("Start time when this module is visible"),
default=DEFAULT_START_DATE,
scope=Scope.settings
)
end = Date(help=_("Date that this class ends"), scope=Scope.settings)
cosmetic_display_price = Integer(
display_name=_("Cosmetic Course Display Price"),
help=_(
"The cost displayed to students for enrolling in the course. If a paid course registration price is "
"set by an administrator in the database, that price will be displayed instead of this one."
),
default=0,
scope=Scope.settings,
)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help=_("Grading policy definition for this class"),
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
help=_("Enter the name of the course as it should appear in the edX.org course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
show_chat = Boolean(
display_name=_("Show Chat Widget"),
help=_("Enter true or false. When true, students can see the chat widget in the course."),
default=False,
scope=Scope.settings
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Inside the provided '
'brackets, enter an additional set of square brackets surrounding each pair of dates you add. '
'Format each pair of dates as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, '
'format each pair as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"]. Be sure to include the "T" between '
'the date and time. For example, an entry defining two blackout periods looks like this, including '
'the outer pair of square brackets: [["2015-09-15", "2015-09-21"], ["2015-10-01", "2015-10-08"]] '
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2015_T1"}. The "id" '
'value for each category must be unique. In "id" values, the only special characters that are '
'supported are underscore, hyphen, and period.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
help=_("Enter the unique identifier for your course's video files provided by edX."),
scope=Scope.settings
)
facebook_url = String(
help=_(
"Enter the URL for the official course Facebook group. "
"If you provide a URL, the mobile app includes a button that students can tap to access the group."
),
default=None,
display_name=_("Facebook URL"),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
enable_ccx = Boolean(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
display_name=_("Enable CCX"),
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
help=_(
"Allow course instructors to assign CCX Coach roles, and allow coaches to manage Custom Courses on edX."
" When false, Custom Courses cannot be created, but existing Custom Courses will be preserved."
),
default=False,
scope=Scope.settings
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
checklists = List(
scope=Scope.settings,
default=[
{
"short_description": _("Getting Started With Studio"),
"items": [
{
"short_description": _("Add Course Team Members"),
"long_description": _(
"Grant your collaborators permission to edit your course so you can work together."
),
"is_checked": False,
"action_url": "ManageUsers",
"action_text": _("Edit Course Team"),
"action_external": False,
},
{
"short_description": _("Set Important Dates for Your Course"),
"long_description": _(
"Establish your course's student enrollment and launch dates on the Schedule and Details "
"page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Details & Schedule"),
"action_external": False,
},
{
"short_description": _("Draft Your Course's Grading Policy"),
"long_description": _(
"Set up your assignment types and grading policy even if you haven't created all your "
"assignments."
),
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": _("Edit Grading Settings"),
"action_external": False,
},
{
"short_description": _("Explore the Other Studio Checklists"),
"long_description": _(
"Discover other available course authoring tools, and find help when you need it."
),
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False,
},
],
},
{
"short_description": _("Draft a Rough Course Outline"),
"items": [
{
"short_description": _("Create Your First Section and Subsection"),
"long_description": _("Use your course outline to build your first Section and Subsection."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Set Section Release Dates"),
"long_description": _(
"Specify the release dates for each Section in your course. Sections become visible to "
"students on their release dates."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Designate a Subsection as Graded"),
"long_description": _(
"Set a Subsection to be graded as a specific assignment type. Assignments within graded "
"Subsections count toward a student's final grade."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Reordering Course Content"),
"long_description": _("Use drag and drop to reorder the content in your course."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Renaming Sections"),
"long_description": _("Rename Sections by clicking the Section name from the Course Outline."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Deleting Course Content"),
"long_description": _(
"Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is "
"no Undo function."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Add an Instructor-Only Section to Your Outline"),
"long_description": _(
"Some course authors find using a section for unsorted, in-progress work useful. To do "
"this, create a section and set the release date to the distant future."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
],
},
{
"short_description": _("Explore edX's Support Tools"),
"items": [
{
"short_description": _("Explore the Studio Help Forum"),
"long_description": _(
"Access the Studio Help forum from the menu that appears when you click your user name "
"in the top right corner of Studio."
),
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": _("Visit Studio Help"),
"action_external": True,
},
{
"short_description": _("Enroll in edX 101"),
"long_description": _("Register for edX 101, edX's primer for course creation."),
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": _("Register for edX 101"),
"action_external": True,
},
{
"short_description": _("Download the Studio Documentation"),
"long_description": _("Download the searchable Studio reference documentation in PDF form."),
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": _("Download Documentation"),
"action_external": True,
},
],
},
{
"short_description": _("Draft Your Course About Page"),
"items": [
{
"short_description": _("Draft a Course Description"),
"long_description": _(
"Courses on edX have an About page that includes a course video, description, and more. "
"Draft the text students will read before deciding to enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Staff Bios"),
"long_description": _(
"Showing prospective students who their instructor will be is helpful. "
"Include staff bios on the course About page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course FAQs"),
"long_description": _("Include a short list of frequently asked questions about your course."),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course Prerequisites"),
"long_description": _(
"Let students know what knowledge and/or skills they should have before "
"they enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
],
},
],
)
info_sidebar_name = String(
display_name=_("Course Info Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Info page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format for due dates. The default is Mon DD, YYYY. Enter \"%m-%d-%Y\" for MM-DD-YYYY, "
"\"%d-%m-%Y\" for DD-MM-YYYY, \"%Y-%m-%d\" for YYYY-MM-DD, or \"%Y-%d-%m\" for YYYY-DD-MM."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Enter end, early_with_info, or early_no_info. After certificate generation, students who passed see a "
"link to their certificates on the dashboard and students who did not pass see information about the "
"grading configuration. The default is end, which displays this certificate information to all students "
"after the course end date. To display this certificate information to all students as soon as "
"certificates are generated, enter early_with_info. To display only the links to passing students as "
"soon as certificates are generated, enter early_no_info."
),
scope=Scope.settings,
default="end"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
issue_badges = Boolean(
display_name=_("Issue Open Badges"),
help=_(
"Issue Open Badges badges for this course. Badges are generated when certificates are created."
),
scope=Scope.settings,
default=True
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the short name of the course to use on the certificate that "
"students receive when they complete the course."
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the long name of the course to use on the certificate that students "
"receive when they complete the course."
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
cert_html_view_enabled = Boolean(
display_name=_("Certificate Web/HTML View Enabled"),
help=_("If true, certificate Web/HTML views are enabled for the course."),
scope=Scope.settings,
default=False,
)
cert_html_view_overrides = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Web/HTML View Overrides"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific overrides for the Web/HTML template parameters here (JSON format)"),
scope=Scope.settings,
)
# Specific certificate information managed via Studio (should eventually fold other cert settings into this)
certificates = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Configuration"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific configuration information here (JSON format)"),
scope=Scope.settings,
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, edX users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
social_sharing_url = String(
display_name=_("Social Media Sharing URL"),
help=_(
"If dashboard social sharing and custom course URLs are enabled, you can provide a URL "
"(such as the URL to a course About page) that social media sites can link to. URLs must "
"be fully qualified. For example: http://www.edx.org/course/Introduction-to-MOOCs-ITM001"
),
default=None,
scope=Scope.settings,
)
language = String(
display_name=_("Course Language"),
help=_("Specify the language of your course."),
default=None,
scope=Scope.settings
)
teams_configuration = Dict(
display_name=_("Teams Configuration"),
help=_(
"Enter configuration for the teams feature. Expects two entries: max_team_size and topics, where "
"topics is a list of topics."
),
scope=Scope.settings
)
enable_proctored_exams = Boolean(
display_name=_("Enable Proctored Exams"),
help=_(
"Enter true or false. If this value is true, timed and proctored exams are enabled in your course."
),
default=False,
scope=Scope.settings
)
minimum_grade_credit = Float(
display_name=_("Minimum Grade for Credit"),
help=_(
"The minimum grade that a learner must earn to receive credit in the course, "
"as a decimal between 0.0 and 1.0. For example, for 75%, enter 0.75."
),
default=0.8,
scope=Scope.settings,
)
class CourseModule(CourseFields, SequenceModule): # pylint: disable=abstract-method
"""
The CourseDescriptor needs its module_class to be a SequenceModule, but some code that
expects a CourseDescriptor to have all its fields can fail if it gets a SequenceModule instead.
This class is to make sure that all the fields are present in all cases.
"""
class CourseDescriptor(CourseFields, SequenceDescriptor, LicenseMixin):
"""
The descriptor for the course XModule
"""
module_class = CourseModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except IOError:
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
# load license if it exists
definition = LicenseMixin.parse_license_from_xml(definition, xml_object)
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
# handle license specifically. Default the course to have a license
# of "All Rights Reserved", if a license is not explicitly set.
self.add_license_to_xml(xml_object, default="all-rights-reserved")
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
return course_metadata_utils.has_course_ended(self.end)
def may_certify(self):
"""
Return whether it is acceptable to show the student a certificate download link.
"""
return course_metadata_utils.may_certify_for_course(
self.certificates_display_behavior,
self.certificates_show_before_end,
self.has_ended()
)
def has_started(self):
return course_metadata_utils.has_course_started(self.start)
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
# If this descriptor has been bound to a student, return the corresponding
# XModule. If not, just use the descriptor itself
try:
module = getattr(self, '_xmodule', None)
if not module:
module = self
except UndefinedContext:
module = self
def possibly_scored(usage_key):
"""Can this XBlock type can have a score or children?"""
return usage_key.block_type in self.block_types_affecting_grading
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children(usage_key_filter=possibly_scored):
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for chapter in self.get_children():
for section in chapter.get_children():
if section.graded:
xmoduledescriptors = list(yield_descriptor_descendents(section))
xmoduledescriptors.append(section)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': section,
'xmoduledescriptors': [child for child in xmoduledescriptors if child.has_score]
}
section_format = section.format if section.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(section)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@lazy
def block_types_affecting_grading(self):
"""Return all block types that could impact grading (i.e. scored, or having children)."""
return frozenset(
cat for (cat, xblock_class) in XBlock.load_classes() if (
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
)
)
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the desired text corresponding the course's start date and time in UTC. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
return course_metadata_utils.course_start_datetime_text(
self.start,
self.advertised_start,
format_string,
i18n.ugettext,
i18n.strftime
)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return course_metadata_utils.course_start_date_is_default(
self.start,
self.advertised_start
)
def end_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the end date or date_time for the course formatted as a string.
"""
return course_metadata_utils.course_end_datetime_text(
self.end,
format_string,
self.runtime.service(self, "i18n").strftime
)
def get_discussion_blackout_datetimes(self):
"""
Get a list of dicts with start and end fields with datetime values from
the discussion_blackouts setting
"""
date_proxy = Date()
try:
ret = [
{"start": date_proxy.from_json(start), "end": date_proxy.from_json(end)}
for start, end
in filter(None, self.discussion_blackouts)
]
for blackout in ret:
if not blackout["start"] or not blackout["end"]:
raise ValueError
return ret
except (TypeError, ValueError):
log.exception(
"Error parsing discussion_blackouts %s for course %s",
self.discussion_blackouts,
self.id
)
return []
@property
def forum_posts_allowed(self):
"""
Return whether forum posts are allowed by the discussion_blackouts
setting
"""
blackouts = self.get_discussion_blackout_datetimes()
now = datetime.now(UTC())
for blackout in blackouts:
if blackout["start"] <= now <= blackout["end"]:
return False
return True
@property
def number(self):
"""
Returns this course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
"""
return course_metadata_utils.number_for_course_location(self.location)
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
def clean_id(self, padding_char='='):
"""
Returns a unique deterministic base32-encoded ID for the course.
The optional padding_char parameter allows you to override the "=" character used for padding.
"""
return course_metadata_utils.clean_course_key(self.location.course_key, padding_char)
@property
def teams_enabled(self):
"""
Returns whether or not teams has been enabled for this course.
Currently, teams are considered enabled when at least one topic has been configured for the course.
"""
if self.teams_configuration:
return len(self.teams_configuration.get('topics', [])) > 0
return False
@property
def teams_max_size(self):
"""
Returns the max size for teams if teams has been configured, else None.
"""
return self.teams_configuration.get('max_team_size', None)
@property
def teams_topics(self):
"""
Returns the topics that have been configured for teams for this course, else None.
"""
return self.teams_configuration.get('topics', None)
|
nagyistoce/edx-platform
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 62,216
|
[
"VisIt"
] |
0efb137ef016c17ffdbe04ff018a905b2b01119cbf29a3402a799cbbf300633e
|
"""
@file outputs.py
@author Yun-Pang Wang
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-12-25
@version $Id: outputs.py 12595 2012-08-24 14:07:33Z dkrajzew $
This script is for generating the outputs from the choosed traffic assignment.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, random, string, sys, datetime, operator, math
from network import Net
from elements import Vehicle
# calculate the time for reading the input data (matrix data are excluded.)
def timeForInput(inputreaderstart):
fouttime = file('timeforinput.txt', 'w')
inputreadtime = datetime.datetime.now() - inputreaderstart
fouttime.write('Time for reading input files:%s\n' %inputreadtime)
fouttime.close()
# output the input matrices, origins, destinations and the number of OD pairsdemand > 0)
def outputODZone(startVertices, endVertices, Pshort_EffCells, MatrixCounter):
foutmatrixstart = file('origins.txt', 'a')
foutmatrixend = file('destinations.txt', 'a')
foutmatrixstart.write('Interval =%s\n' %(MatrixCounter))
foutmatrixstart.write('number of origins=%s\n' %len(startVertices))
foutmatrixstart.write('number of effective OD cells for Passenger vehicles=%s\n' %(Pshort_EffCells))
# foutmatrixstart.write('number of effective OD cells for Passenger vehicles(long distances)=%s\n' %(Plong_EffCells))
# foutmatrixstart.write('number of effective OD cells for trucks=%s\n' %(Truck_EffCells))
for i in range (0, len(startVertices)):
foutmatrixstart.write('%s\n' %startVertices[i])
foutmatrixend.write('number of destinations=%s\n' %len(endVertices))
for j in range (0, len(endVertices)):
foutmatrixend.write('%s\n' %endVertices[j])
foutmatrixstart.close()
foutmatrixend.close()
# output the network data which is based on the SUMO-network
def outputNetwork(net):
foutnet = file('network.txt', 'w')
net.printNet(foutnet)
foutnet.close()
# ouput the required CPU time for the assignment and the assignment results (e.g. link flows, link travel times)
def outputStatistics(net, starttime, periods):
totaltime = 0.0
totalflow = 0.0
assigntime = datetime.datetime.now() - starttime
foutMOE = file('MOE.txt', 'w')
foutMOE.write('Number of analyzed periods(hr):%s' %periods)
for edge in net._edges: # generate the output of the link travel times
if edge.estcapacity > 0.:
totaltime += edge.flow * edge.actualtime
totalflow += edge.flow
foutMOE.write('\nedge:%s \t from:%s \t to:%s \t freeflowtime(s):%2.2f \t traveltime(s):%2.2f \t traffic flow(veh):%2.2f \t v/c:%2.2f' \
%(edge._id, edge._from, edge._to, edge.freeflowtime, edge.actualtime, edge.flow, (edge.flow/edge.estcapacity)))
if edge.flow > edge.estcapacity and edge.connection == 0:
foutMOE.write('****overflow!')
avetime = totaltime / totalflow
foutMOE.write('\nTotal flow(veh):%2.2f \t average travel time(s):%2.2f\n' %(totalflow, avetime))
foutMOE.write('\nTime for the traffic assignment and reading matrices:%s' %assigntime)
foutMOE.close()
return assigntime
# output the releasing time and the route for each vehicle
def sortedVehOutput(vehicles, departtime, options, foutroute):
random.seed(42)
for veh in vehicles:
if veh.depart == 0:
veh.depart = random.randint(departtime, departtime + 3600*options.hours)
vehicles.sort(key=operator.attrgetter('depart')) # sorting by departure times
for veh in vehicles: # output the generated routes
foutroute.write(' <vehicle id="%s" depart="%d" departLane="free">\n' %(veh.label, veh.depart))
foutroute.write(' <route>')
for edge in veh.route[1:-1]: # for generating vehicle routes used in SUMO
foutroute.write('%s ' % edge._id)
foutroute.write('</route>\n')
foutroute.write(' </vehicle>\n')
# output the result of the matrix estimation with the traffic counts
def outputMatrix(startVertices, endVertices, estMatrix, daytimeindex):
filename = 'estimatedMatri-' + daytimeindex + '.fma'
foutmtx = file(filename, 'w')
foutmtx.write('$VMR;D2;estimated with the generalized least squares model\n')
foutmtx.write('* Verkehrsmittelkennung\n')
foutmtx.write(' 1\n')
foutmtx.write('* Von Bis\n\n')
foutmtx.write('* Faktor\n')
foutmtx.write('1.00\n')
foutmtx.write('*\n')
foutmtx.write('* Deutsches Zentrum fuer Luft- und Raumfahrt e.V.\n')
foutmtx.write('* %s\n' %datetime.datetime.now())
foutmtx.write('* Anzahl Bezirke\n')
foutmtx.write('%s\n' %len(startVertices))
foutmtx.write('*\n')
for startVertex in startVertices:
foutmtx.write('%s ' %startVertex.label)
foutmtx.write('\n*')
for start, startVertex in enumerate(startVertices):
count = -1
foutmtx.write('\n* from: %s\n' %startVertex.label)
for end, endVertex in enumerate(endVertices):
count += 1
if operator.mod(count,12) != 0:
foutmtx.write('%s ' %estMatrix[start][end])
elif count > 12:
foutmtx.write('\n%s '%estMatrix[start][end])
foutmtx.close()
def linkChoicesOutput(net, startVertices, endVertices, matrixPshort, linkChoiceMap, odPairsMap, outputdir, starttime):
foutchoice = file(os.path.join(outputdir, "linkchoices.xml"), 'w')
print >> foutchoice, """<?xml version="1.0"?>
<!-- generated on %s by $Id: outputs.py 12595 2012-08-24 14:07:33Z dkrajzew $ -->
<edgechoices>""" % starttime
for e in net._detectedEdges:
if len(linkChoiceMap[e.detected])> 0:
foutchoice.write(' <edge id="%s" flows="%s">\n' %(e.label, e.flow))
foutchoice.write(' <choiceprobs>\n')
for start, startVertex in enumerate(startVertices):
for end, endVertex in enumerate(endVertices):
if startVertex.label != endVertex.label and matrixPshort[start][end] > 0.:
odIndex = odPairsMap[startVertex.label][endVertex.label]
foutchoice.write(' <choice origin="%s" destination="%s" choiceprob="%.5f"/>\n' \
%(startVertex.label, endVertex.label, linkChoiceMap[e.detected][odIndex]/matrixPshort[start][end]))
foutchoice.write(' </choiceprobs>\n')
foutchoice.write(' </edge>\n')
foutchoice.write('</edgechoices>\n')
foutchoice.close()
|
rudhir-upretee/Sumo_With_Netsim
|
tools/assign/outputs.py
|
Python
|
gpl-3.0
| 6,838
|
[
"MOE"
] |
083e0099ae1da68e706a99d0ed0b3b59285d4fb7eb57b8bfe4afa4d8b4613cf8
|
import os
import distutils.spawn
import external.cclib as cclib
import itertools
import logging
from subprocess import Popen
import re
from rmgpy.molecule import Molecule
from qmdata import parseCCLibData
from molecule import QMMolecule
class Gaussian:
"""
A base class for all QM calculations that use Gaussian.
Classes such as :class:`GaussianMol` will inherit from this class.
"""
inputFileExtension = '.gjf'
outputFileExtension = '.log'
executablesToTry = ('g09', 'g03')
for exe in executablesToTry:
try:
executablePath = distutils.spawn.find_executable(exe)
except:
executablePath = None
if executablePath is not None:
break
else: # didn't break
logging.debug("Did not find Gaussian on path, checking if it exists in a declared GAUSS_EXEDIR, g09root or g03root...")
gaussEnv = os.getenv('GAUSS_EXEDIR') or os.getenv('g09root') or os.getenv('g03root') or ""
possibleDirs = gaussEnv.split(':')# GAUSS_EXEDIR may be a list like "path1:path2:path3"
for exe, possibleDir in itertools.product(executablesToTry, possibleDirs):
executablePath = os.path.join(possibleDir, exe)
if os.path.exists(executablePath):
break
else: # didn't break
executablePath = os.path.join(gaussEnv , '(Gaussian 2003 or 2009)')
usePolar = False
#: List of phrases that indicate failure
#: NONE of these must be present in a succesful job.
failureKeys = [
'ERROR TERMINATION',
'IMAGINARY FREQUENCIES'
]
#: List of phrases to indicate success.
#: ALL of these must be present in a successful job.
successKeys = [
'Normal termination of Gaussian'
]
def testReady(self):
if not os.path.exists(self.executablePath):
raise Exception("Couldn't find Gaussian executable at {0}. Try setting your GAUSS_EXEDIR environment variable.".format(self.executablePath))
def run(self):
self.testReady()
# submits the input file to Gaussian
process = Popen([self.executablePath, self.inputFilePath, self.outputFilePath])
process.communicate()# necessary to wait for executable termination!
return self.verifyOutputFile()
def verifyOutputFile(self):
"""
Check's that an output file exists and was successful.
Returns a boolean flag that states whether a successful GAUSSIAN simulation already exists for the molecule with the
given (augmented) InChI Key.
The definition of finding a successful simulation is based on these criteria:
1) finding an output file with the file name equal to the InChI Key
2) NOT finding any of the keywords that are denote a calculation failure
3) finding all the keywords that denote a calculation success.
4) finding a match between the InChI of the given molecule and the InchI found in the calculation files
5) checking that the optimized geometry, when connected by single bonds, is isomorphic with self.molecule (converted to single bonds)
If any of the above criteria is not matched, False will be returned.
If all are satisfied, it will return True.
"""
if not os.path.exists(self.outputFilePath):
logging.info("Output file {0} does not exist.".format(self.outputFilePath))
return False
InChIMatch=False #flag (1 or 0) indicating whether the InChI in the file matches InChIaug this can only be 1 if InChIFound is also 1
InChIFound=False #flag (1 or 0) indicating whether an InChI was found in the log file
# Initialize dictionary with "False"s
successKeysFound = dict([(key, False) for key in self.successKeys])
with open(self.outputFilePath) as outputFile:
for line in outputFile:
line = line.strip()
for element in self.failureKeys: #search for failure keywords
if element in line:
logging.error("Gaussian output file contains the following error: {0}".format(element) )
return False
for element in self.successKeys: #search for success keywords
if element in line:
successKeysFound[element] = True
if line.startswith("InChI="):
logFileInChI = line #output files should take up to 240 characters of the name in the input file
InChIFound = True
if self.uniqueIDlong in logFileInChI:
InChIMatch = True
elif self.uniqueIDlong.startswith(logFileInChI):
logging.info("InChI too long to check, but beginning matches so assuming OK.")
InChIMatch = True
else:
logging.warning("InChI in log file ({0}) didn't match that in geometry ({1}).".format(logFileInChI, self.geometry.uniqueIDlong))
if self.geometry.uniqueIDlong.startswith(logFileInChI):
logging.warning("but the beginning matches so it's probably just a truncation problem.")
InChIMatch = True
# Check that ALL 'success' keywords were found in the file.
if not all( successKeysFound.values() ):
logging.error('Not all of the required keywords for success were found in the output file!')
return False
if not InChIFound:
logging.error("No InChI was found in the Gaussian output file {0}".format(self.outputFilePath))
return False
if not InChIMatch:
#InChIs do not match (most likely due to limited name length mirrored in log file (240 characters), but possibly due to a collision)
return self.checkForInChiKeyCollision(logFileInChI) # Not yet implemented!
# Compare the optimized geometry to the original molecule
qmData = self.parse()
cclibMol = Molecule()
cclibMol.fromXYZ(qmData.atomicNumbers, qmData.atomCoords.value)
testMol = self.molecule.toSingleBonds()
if not cclibMol.isIsomorphic(testMol):
logging.info("Incorrect connectivity for optimized geometry in file {0}".format(self.outputFilePath))
return False
logging.info("Successful {1} quantum result in {0}".format(self.outputFilePath, self.__class__.__name__))
return True
def parse(self):
"""
Parses the results of the Gaussian calculation, and returns a QMData object.
"""
parser = cclib.parser.Gaussian(self.outputFilePath)
parser.logger.setLevel(logging.ERROR) #cf. http://cclib.sourceforge.net/wiki/index.php/Using_cclib#Additional_information
cclibData = parser.parse()
radicalNumber = sum([i.radicalElectrons for i in self.molecule.atoms])
qmData = parseCCLibData(cclibData, radicalNumber+1)
return qmData
class GaussianMol(QMMolecule, Gaussian):
"""
A base Class for calculations of molecules using Gaussian.
Inherits from both :class:`QMMolecule` and :class:`Gaussian`.
"""
def inputFileKeywords(self, attempt):
"""
Return the top keywords for attempt number `attempt`.
NB. `attempt` begins at 1, not 0.
"""
assert attempt <= self.maxAttempts
if attempt > self.scriptAttempts:
attempt -= self.scriptAttempts
return self.keywords[attempt-1]
def writeInputFile(self, attempt):
"""
Using the :class:`Geometry` object, write the input file
for the `attempt`.
"""
molfile = self.getMolFilePathForCalculation(attempt)
atomline = re.compile('\s*([\- ][0-9.]+\s+[\-0-9.]+\s+[\-0-9.]+)\s+([A-Za-z]+)')
output = ['', self.geometry.uniqueIDlong, '' ]
output.append("{charge} {mult}".format(charge=0, mult=(self.molecule.getRadicalCount() + 1) ))
atomCount = 0
with open(molfile) as molinput:
for line in molinput:
match = atomline.match(line)
if match:
output.append("{0:8s} {1}".format(match.group(2), match.group(1)))
atomCount += 1
assert atomCount == len(self.molecule.atoms)
output.append('')
input_string = '\n'.join(output)
top_keys = self.inputFileKeywords(attempt)
with open(self.inputFilePath, 'w') as gaussianFile:
gaussianFile.write(top_keys)
gaussianFile.write('\n')
gaussianFile.write(input_string)
gaussianFile.write('\n')
if self.usePolar:
gaussianFile.write('\n\n\n')
raise NotImplementedError("Not sure what should be here, if anything.")
#gaussianFile.write(polar_keys)
def generateQMData(self):
"""
Calculate the QM data and return a QMData object.
"""
for atom in self.molecule.vertices:
if atom.atomType.label in ('N5s', 'N5d', 'N5dd', 'N5t', 'N5b'):
return None
if self.verifyOutputFile():
logging.info("Found a successful output file already; using that.")
source = "QM {0} calculation found from previous run.".format(self.__class__.__name__)
else:
self.createGeometry()
success = False
for attempt in range(1, self.maxAttempts+1):
self.writeInputFile(attempt)
logging.info('Trying {3} attempt {0} of {1} on molecule {2}.'.format(attempt, self.maxAttempts, self.molecule.toSMILES(), self.__class__.__name__))
success = self.run()
if success:
logging.info('Attempt {0} of {1} on species {2} succeeded.'.format(attempt, self.maxAttempts, self.molecule.toAugmentedInChI()))
source = "QM {0} calculation attempt {1}".format(self.__class__.__name__, attempt )
break
else:
logging.error('QM thermo calculation failed for {0}.'.format(self.molecule.toAugmentedInChI()))
return None
result = self.parse() # parsed in cclib
result.source = source
return result # a CCLibData object
def getParser(self, outputFile):
"""
Returns the appropriate cclib parser.
"""
return cclib.parser.Gaussian(outputFile)
class GaussianMolPM3(GaussianMol):
"""
Gaussian PM3 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's only the 'pm3' in the keywords that differs.
"""
#: Keywords that will be added at the top of the qm input file
keywords = [
# The combinations of keywords were derived by Greg Magoon for pm3 in Gaussian. His comments are attached to each combination.
"# pm3 opt=(verytight,gdiis) freq IOP(2/16=3)", # added IOP option to avoid aborting when symmetry changes; 3 is supposed to be default according to documentation, but it seems that 0 (the default) is the only option that doesn't work from 0-4; also, it is interesting to note that all 4 options seem to work for test case with z-matrix input rather than xyz coords; cf. http://www.ccl.net/cgi-bin/ccl/message-new?2006+10+17+005 for original idea for solution
"# pm3 opt=(verytight,gdiis) freq IOP(2/16=3) IOP(4/21=2)", # use different SCF method; this addresses at least one case of failure for a C4H7J species
"# pm3 opt=(verytight,calcfc,maxcyc=200) freq IOP(2/16=3) nosymm" , # try multiple different options (no gdiis, use calcfc, nosymm); 7/21/09: added maxcyc option to fix case of MPTBUKVAJYJXDE-UHFFFAOYAPmult3 (InChI=1/C4H10O5Si/c1-3-7-9-10(5,6)8-4-2/h4-5H,3H2,1-2H3/mult3) (file manually copied to speed things along)
"# pm3 opt=(verytight,calcfc,maxcyc=200) freq=numerical IOP(2/16=3) nosymm", # numerical frequency keyword version of keyword #3; used to address GYFVJYRUZAKGFA-UHFFFAOYALmult3 (InChI=1/C6H14O6Si/c1-3-10-13(8,11-4-2)12-6-5-9-7/h6-7H,3-5H2,1-2H3/mult3) case; (none of the existing Gaussian or MOPAC combinations worked with it)
"# pm3 opt=(verytight,gdiis,small) freq IOP(2/16=3)", # somehow, this worked for problematic case of ZGAWAHRALACNPM-UHFFFAOYAF (InChI=1/C8H17O5Si/c1-3-11-14(10,12-4-2)13-8-5-7(9)6-8/h7-9H,3-6H2,1-2H3); (was otherwise giving l402 errors); even though I had a keyword that worked for this case, I manually copied the fixed log file to QMfiles folder to speed things along; note that there are a couple of very low frequencies (~5-6 cm^-1 for this case)
"# pm3 opt=(verytight,nolinear,calcfc,small) freq IOP(2/16=3)", # used for troublesome C5H7J2 case (similar error to C5H7J below); calcfc is not necessary for this particular species, but it speeds convergence and probably makes it more robust for other species
"# pm3 opt=(verytight,gdiis,maxcyc=200) freq=numerical IOP(2/16=3)", # use numerical frequencies; this takes a relatively long time, so should only be used as one of the last resorts; this seemed to address at least one case of failure for a C6H10JJ species; 7/15/09: maxcyc=200 added to address GVCMURUDAUQXEY-UHFFFAOYAVmult3 (InChI=1/C3H4O7Si/c1-2(9-6)10-11(7,8)3(4)5/h6-7H,1H2/mult3)...however, result was manually pasted in QMfiles folder to speed things along
"# pm3 opt=tight freq IOP(2/16=3)", # this worked for problematic case of SZSSHFMXPBKYPR-UHFFFAOYAF (InChI=1/C7H15O5Si/c1-3-10-13(8,11-4-2)12-7-5-6-9-7/h7H,3-6H2,1-2H3) (otherwise, it had l402.exe errors); corrected log file was manually copied to QMfiles to speed things along; we could also add a freq=numerical version of this keyword combination for added robustness; UPDATE: see below
"# pm3 opt=tight freq=numerical IOP(2/16=3)", # used for problematic case of CIKDVMUGTARZCK-UHFFFAOYAImult4 (InChI=1/C8H15O6Si/c1-4-12-15(10,13-5-2)14-7-6-11-8(7,3)9/h7H,3-6H2,1-2H3/mult4 (most other cases had l402.exe errors); corrected log file was manually copied to QMfiles to speed things along
"# pm3 opt=(tight,nolinear,calcfc,small,maxcyc=200) freq IOP(2/16=3)", # similar to existing #5, but uses tight rather than verytight; used for ADMPQLGIEMRGAT-UHFFFAOYAUmult3 (InChI=1/C6H14O5Si/c1-4-9-12(8,10-5-2)11-6(3)7/h6-7H,3-5H2,1-2H3/mult3)
"# pm3 opt freq IOP(2/16=3)", # use default (not verytight) convergence criteria; use this as last resort
"# pm3 opt=(verytight,gdiis) freq=numerical IOP(2/16=3) IOP(4/21=200)", # to address problematic C10H14JJ case
"# pm3 opt=(calcfc,verytight,newton,notrustupdate,small,maxcyc=100,maxstep=100) freq=(numerical,step=10) IOP(2/16=3) nosymm", # for very troublesome RRMZRNPRCUANER-UHFFFAOYAQ (InChI=1/C5H7/c1-3-5-4-2/h3H,1-2H3) case...there were troubles with negative frequencies, where I don't think they should have been; step size of numerical frequency was adjusted to give positive result; accuracy of result is questionable; it is possible that not all of these keywords are needed; note that for this and other nearly free rotor cases, I think heat capacity will be overestimated by R/2 (R vs. R/2) (but this is a separate issue)
"# pm3 opt=(tight,gdiis,small,maxcyc=200,maxstep=100) freq=numerical IOP(2/16=3) nosymm", # for troublesome QDERTVAGQZYPHT-UHFFFAOYAHmult3(InChI=1/C6H14O4Si/c1-4-8-11(7,9-5-2)10-6-3/h4H,5-6H2,1-3H3/mult3); key aspects appear to be tight (rather than verytight) convergence criteria, no calculation of frequencies during optimization, use of numerical frequencies, and probably also the use of opt=small
"# pm3 opt=(verytight,gdiis,calcall) IOP(2/16=3)", # used for troublesome C5H7J case; note that before fixing, I got errors like the following: "Incomplete coordinate system. Try restarting with Geom=Check Guess=Read Opt=(ReadFC,NewRedundant) Incomplete coordinate system. Error termination via Lnk1e in l103.exe"; we could try to restart, but it is probably preferrable to have each keyword combination standalone; another keyword that may be helpful if additional problematic cases are encountered is opt=small; 6/9/09 note: originally, this had # pm3 opt=(verytight,gdiis,calcall) freq IOP(2/16=3)" (with freq keyword), but I discovered that in this case, there are two thermochemistry sections and cclib parses frequencies twice, giving twice the number of desired frequencies and hence produces incorrect thermo; this turned up on C5H6JJ isomer
"# pm3 opt=(verytight,gdiis,calcall,small,maxcyc=200) IOP(2/16=3) IOP(4/21=2) nosymm", # worked for troublesome ketene case: CCGKOQOJPYTBIH-UHFFFAOYAO (InChI=1/C2H2O/c1-2-3/h1H2) (could just increase number of iterations for similar keyword combination above (#6 at the time of this writing), allowing symmetry, but nosymm seemed to reduce # of iterations; I think one of nosymm or higher number of iterations would allow the similar keyword combination to converge; both are included here for robustness)
"# pm3 opt=(verytight,gdiis,calcall,small) IOP(2/16=3) nosymm", # added for case of ZWMVZWMBTVHPBS-UHFFFAOYAEmult3 (InChI=1/C4H4O2/c1-3-5-6-4-2/h1-2H2/mult3)
"# pm3 opt=(calcall,small,maxcyc=100) IOP(2/16=3)", # used to address troublesome FILUFGAZMJGNEN-UHFFFAOYAImult3 case (InChI=1/C5H6/c1-3-5-4-2/h3H,1H2,2H3/mult3)
]
class GaussianMolPM6(GaussianMol):
"""
Gaussian PM6 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's only the 'pm6' in the keywords that differs.
"""
#: Keywords that will be added at the top of the qm input file
keywords = [
# The combinations of keywords were derived by Greg Magoon for pm3. For now, we assume similar ones will work for pm6:
"# pm6 opt=(verytight,gdiis) freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis) freq IOP(2/16=3) IOP(4/21=2)",
"# pm6 opt=(verytight,calcfc,maxcyc=200) freq IOP(2/16=3) nosymm" ,
"# pm6 opt=(verytight,calcfc,maxcyc=200) freq=numerical IOP(2/16=3) nosymm",
"# pm6 opt=(verytight,gdiis,small) freq IOP(2/16=3)",
"# pm6 opt=(verytight,nolinear,calcfc,small) freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis,maxcyc=200) freq=numerical IOP(2/16=3)",
"# pm6 opt=tight freq IOP(2/16=3)",
"# pm6 opt=tight freq=numerical IOP(2/16=3)",
"# pm6 opt=(tight,nolinear,calcfc,small,maxcyc=200) freq IOP(2/16=3)",
"# pm6 opt freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis) freq=numerical IOP(2/16=3) IOP(4/21=200)",
"# pm6 opt=(calcfc,verytight,newton,notrustupdate,small,maxcyc=100,maxstep=100) freq=(numerical,step=10) IOP(2/16=3) nosymm",
"# pm6 opt=(tight,gdiis,small,maxcyc=200,maxstep=100) freq=numerical IOP(2/16=3) nosymm",
"# pm6 opt=(verytight,gdiis,calcall) IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis,calcall,small,maxcyc=200) IOP(2/16=3) IOP(4/21=2) nosymm",
"# pm6 opt=(verytight,gdiis,calcall,small) IOP(2/16=3) nosymm",
"# pm6 opt=(calcall,small,maxcyc=100) IOP(2/16=3)",
]
|
pierrelb/RMG-Py
|
rmgpy/qm/gaussian.py
|
Python
|
mit
| 19,822
|
[
"Gaussian",
"MOPAC",
"cclib"
] |
84b98cd79b46f33a84e451dfe99dd90a32372d79d0328b1c8bafd88629d39502
|
from sympy import *
from sympy.parsing.sympy_parser import parse_expr, standard_transformations, convert_xor, auto_number
from sympy.printing.latex import LatexPrinter
import re
import ANNarchy.core.Global as Global
from ANNarchy.core.Random import RandomDistribution
from ..Extraction import *
from ANNarchy.parser.AnalyseSynapse import analyse_synapse
##################################
### Process individual equations
##################################
def _process_random(val):
"Transforms a connector attribute (weights, delays) into a string representation"
if isinstance(val, RandomDistribution):
return val.latex()
else:
return str(val)
# Really crappy...
# When target has a number (ff1), sympy thinks the 1 is a number
# the target is replaced by a text to avoid this
target_replacements = [
'firsttarget',
'secondtarget',
'thirdtarget',
'fourthtarget',
'fifthtarget',
'sixthtarget',
'seventhtarget',
'eighthtarget',
'ninthtarget',
'tenthtarget',
]
def _process_neuron_equations(neuron):
code = ""
# Extract parameters and variables
parameters = extract_parameters(neuron.parameters, neuron.extra_values)
variables = extract_variables(neuron.equations)
variable_names = [var['name'] for var in variables]
attributes, local_var, semiglobal_var, global_var = get_attributes(parameters, variables, neuron=True)
# Create a dictionary for parsing
local_dict = {
'g_target': Symbol('g_{\\text{target}}'),
'dt': Symbol('\Delta t'),
't_pre': Symbol('t_{\\text{pre}}'),
't_post': Symbol('t_{\\text{pos}}'),
'Uniform': Function('\mathcal{U}'),
'Normal': Function('\mathcal{N}'),
'ite': Function('ite', nargs=3)
}
for att in attributes:
local_dict[att] = Symbol(_latexify_name(att, variable_names))
tex_dict = {}
for key, val in local_dict.items():
tex_dict[val] = str(val)
for var in variables:
# Retrieve the equation
eq = var['eq']
# Extract sum(target)
targets = []
target_list = re.findall('(?P<pre>[^\w.])sum\(\s*([^()]+)\s*\)', eq)
for l, t in target_list:
if t.strip() == '':
continue
replacement = target_replacements[len(targets)]
targets.append((t.strip(), replacement))
local_dict[replacement] = Symbol(replacement)
tex_dict[replacement] = replacement
for target, repl in targets:
eq = eq.replace('sum('+target+')', repl)
# Parse the equation
ode = re.findall(r'([^\w]*)d([\w]+)/dt', eq)
if len(ode) > 0:
name = ode[0][1]
eq = eq.replace('d'+name+'/dt', '_grad_'+name)
grad_symbol = Symbol('\\frac{d'+_latexify_name(name, variable_names)+'}{dt}')
local_dict['_grad_'+name] = grad_symbol
tex_dict[grad_symbol] = '\\frac{d'+_latexify_name(name, variable_names)+'}{dt}'
var_code = _analyse_equation(var['eq'], eq, local_dict, tex_dict)
# Replace the targets
for target, repl in targets:
target = target.replace("_","\_")
var_code = var_code.replace(repl, '\\sum_{\\text{'+target+'}} w \cdot r^{\\text{pre}}(t-d)')
# Add the code
var['latex'] = var_code
var['ode'] = len(ode) > 0
if not neuron.spike: # rate-code, no spike
return variables, "", []
# Additional code for spiking neurons
spike_condition = _analyse_part(neuron.spike, local_dict, tex_dict)
# Reset
spike_reset = []
reset_vars = extract_variables(neuron.reset)
for var in reset_vars:
eq = var['eq']
spike_reset.append(_analyse_equation(var['eq'], eq, local_dict, tex_dict))
return variables, spike_condition, spike_reset
def _process_synapse_equations(synapse):
psp = ""
code = ""
pre_event = []
post_event = []
# Extract parameters and variables
parameters = extract_parameters(synapse.parameters)
variables = extract_variables(synapse.equations)
variable_names = [var['name'] for var in variables]
attributes, local_var, semiglobal_var, global_var = get_attributes(parameters, variables, neuron=False)
# Create a dictionary for parsing
local_dict = {
'w': Symbol('w(t)'),
'dt': Symbol('\Delta t'),
'g_target': Symbol('g_{\\text{target}(t)}'),
't_pre': Symbol('t_{\\text{pre}}'),
't_post': Symbol('t_{\\text{pos}}'),
'Uniform': Function('\mathcal{U}'),
'Normal': Function('\mathcal{N}'),
'ite': Function('ite', nargs=3)
}
for att in attributes:
local_dict[att] = Symbol(_latexify_name(att, variable_names))
tex_dict = {}
for key, val in local_dict.items():
tex_dict[val] = str(val)
# PSP
if synapse.psp:
psp, untouched_var, dependencies = extract_prepost('psp', synapse.psp.strip(), synapse.description)
for dep in dependencies['post']:
local_dict['_post_'+dep+'__'] = Symbol("{" + dep + "^{\\text{post}}}(t)")
for dep in dependencies['pre']:
local_dict['_pre_'+dep+'__'] = Symbol("{" + dep + "^{\\text{pre}}}(t-d)")
if synapse.type == 'rate':
psp = _analyse_part(psp, local_dict, tex_dict)
else:
psp = "g_\\text{target}(t) \mathrel{+}= " + _analyse_part(psp, local_dict, tex_dict)
else:
if synapse.type == 'rate':
psp = "w(t) \cdot r^{\\text{pre}}(t)"
else:
psp = ""
# Variables
for var in variables:
# Retrieve the equation
eq = var['eq']
# pre/post variables
targets=[]
eq, untouched_var, dependencies = extract_prepost(var['name'], eq, synapse.description)
for dep in dependencies['post']:
if dep.startswith('sum('):
target = re.findall(r'sum\(([\w]+)\)', dep)[0]
targets.append(target)
local_dict['_post_sum_'+target] = Symbol('PostSum'+target)
else:
local_dict['_post_'+dep+'__'] = Symbol("{{" + _latexify_name(dep, variable_names) + "}^{\\text{post}}}(t)")
for dep in dependencies['pre']:
if dep.startswith('sum('):
target = re.findall(r'sum\(([\w]+)\)', dep)[0]
targets.append(target)
local_dict['_pre_sum_'+target] = Symbol('PreSum'+target)
else:
local_dict['_pre_'+dep+'__'] = Symbol("{" + dep + "^{\\text{pre}}}(t-d)")
# Parse the equation
#eq = eq.replace(' ', '') # supress spaces
ode = re.findall(r'([^\w]*)d([\w]+)/dt', eq)
if len(ode) > 0:
name = ode[0][1]
eq = eq.replace('d'+name+'/dt', '_grad_'+name)
grad_symbol = Symbol('\\frac{d'+_latexify_name(name, variable_names)+'}{dt}')
local_dict['_grad_'+name] = grad_symbol
tex_dict[grad_symbol] = '\\frac{d'+_latexify_name(name, variable_names)+'}{dt}'
# Analyse
var_code = _analyse_equation(var['eq'], eq, local_dict, tex_dict)
# replace targets
for target in targets:
var_code = var_code.replace('PostSum'+target, "(\\sum_{\\text{" + target + "}} \\text{psp}(t))^{\\text{post}}")
var_code = var_code.replace('PreSum'+target, "(\\sum_{\\text{" + target + "}} \\text{psp}(t))^{\\text{pre}}")
# Add the code
var['latex'] = var_code
var['ode'] = len(ode) > 0
# Pre-event
if synapse.type == 'spike':
desc = analyse_synapse(synapse)
for var in extract_pre_spike_variable(desc):
eq = var['eq']
# pre/post variables
eq, untouched_var, dependencies = extract_prepost(var['name'], eq, desc)
for dep in dependencies['post']:
local_dict['_post_'+dep+'__'] = Symbol("{" + dep + "^{\\text{post}}}(t)")
for dep in dependencies['pre']:
local_dict['_pre_'+dep+'__'] = Symbol("{" + dep + "^{\\text{pre}}}(t)")
pre_event.append(_analyse_equation(var['eq'], eq, local_dict, tex_dict))
for var in extract_post_spike_variable(desc):
eq = var['eq']
# pre/post variables
eq, untouched_var, dependencies = extract_prepost(var['name'], eq, desc)
for dep in dependencies['post']:
local_dict['_post_'+dep+'__'] = Symbol("{" + dep + "^{\\text{post}}}(t)")
for dep in dependencies['pre']:
local_dict['_pre_'+dep+'__'] = Symbol("{" + dep + "^{\\text{pre}}}(t)")
post_event.append(_analyse_equation(var['eq'], eq, local_dict, tex_dict))
return psp, variables, pre_event, post_event
def _process_functions(functions, begin="\\begin{dmath*}\n", end="\n\\end{dmath*}"):
code = ""
extracted_functions = extract_functions(functions, False)
for func in extracted_functions:
# arguments
args = func['args']
args_list = ""
for arg in args:
args_list += _latexify_name(arg, []) + ", "
args_list = args_list[:-2]
# local dict
local_dict = {}
for att in args:
local_dict[att] = Symbol(_latexify_name(att, []))
tex_dict = {}
for key, val in local_dict.items():
tex_dict[val] = str(val)
# parse the content
content = _analyse_part(func['content'], local_dict, tex_dict)
# generate the code
code += "%(begin)s%(name)s(%(args)s) = %(content)s%(end)s" % {
'name': _latexify_name(func['name'], []),
'args': args_list,
'content': content.strip(),
'begin': begin,
'end': end }
return code
# Splits an equation into two parts, caring for the increments
def _analyse_equation(orig, eq, local_dict, tex_dict):
# Analyse the left part
left = eq.split('=')[0]
split_idx = len(left)
if left[-1] in ['+', '-', '*', '/']:
op = left[-1]
try:
left = _analyse_part(left[:-1], local_dict, tex_dict)
except Exception as e:
Global._print(e)
Global._warning('can not transform the left side of ' + orig +' to LaTeX, you have to do it by hand...')
left = left[:-1]
operator = " = " + left + " " + op + (" (" if op != '+' else '')
operator = " \mathrel{" + op + "}= "
else:
try:
left = _analyse_part(left, local_dict, tex_dict)
except Exception as e:
Global._print(e)
Global._warning('can not transform the left side of ' + orig +' to LaTeX, you have to do it by hand...')
operator = " = "
# Analyse the right part
try:
right = _analyse_part(eq[split_idx+1:], local_dict, tex_dict)
except Exception as e:
Global._print(e)
Global._warning('can not transform the right side of ' + orig +' to LaTeX, you have to do it by hand...')
right = "\\textbf{TODO} %%" + eq[split_idx+1:]
return left + operator + right + (" )" if operator.strip().endswith('(') else "")
class CustomLatexPrinter(LatexPrinter):
def _print_Function(self, expr, exp=None):
'''
For ite() only
'''
func = expr.func.__name__
args = [ str(self._print(arg)) for arg in expr.args ]
if func == 'ite':
return """\\begin{cases}
%(then_code)s \qquad \\text{if} \quad %(if_code)s \\\\
%(else_code)s \qquad \\text{otherwise.}
\end{cases}""" % {'if_code': args[0], 'then_code': args[1], 'else_code': args[2]}
elif func in ['positive', 'pos']:
return "\left(" + str(self._print(args[0])) + "\\right)^+"
elif func in ['negative', 'neg']:
return "(" + str(self._print(args[0])) + ")^-"
return LatexPrinter._print_Function(self, expr, exp)
# Analyses and transform to latex a single part of an equation
def _analyse_part(expr, local_dict, tex_dict):
def regular_expr(expr):
analysed = parse_expr(
expr,
local_dict = local_dict,
transformations = (standard_transformations + (convert_xor,)),
# transformations = (convert_xor,),
# evaluate=False
)
return CustomLatexPrinter(settings={'symbol_names': tex_dict, 'mul_symbol':"dot"}).doprint(analysed)
def _condition(condition):
return regular_expr(transform_condition(condition))
def _extract_conditional(condition):
if_statement = condition[0]
then_statement = condition[1]
else_statement = condition[2]
# IF condition
if_code = _condition(if_statement)
# THEN
if isinstance(then_statement, list): # nested conditional
then_code = _extract_conditional(then_statement)
else:
then_code = regular_expr(then_statement)
# ELSE
if isinstance(else_statement, list): # nested conditional
else_code = _extract_conditional(else_statement)
else:
else_code = regular_expr(else_statement)
return "\\begin{cases}" + then_code + "\qquad \\text{if} \quad " + if_code + "\\\\ "+ else_code +" \qquad \\text{otherwise.} \end{cases}"
# Replace and/or with sympy relationals
expr = transform_condition(expr)
# Extract if/then/else
if 'else' in expr:
ite_code = extract_ite(expr)
return _extract_conditional(ite_code)
# Return the transformed equation
return regular_expr(expr)
def extract_ite(eq):
"if COND: THEN else: ELSE"
def transform(code):
" Transforms the code into a list of lines."
res = []
items = []
for arg in code.split(':'):
items.append( arg.strip())
for i in range(len(items)):
if items[i].startswith('if '):
res.append( items[i].strip() )
elif items[i].strip().endswith('else'):
res.append(items[i].split('else')[0].strip() )
res.append('else' )
else: # the last then
res.append( items[i].strip() )
return res
def parse(lines):
" Recursive analysis of if-else statements"
result = []
while lines:
if lines[0].startswith('if'):
block = [lines.pop(0).split('if')[1], parse(lines)]
if lines[0].startswith('else'):
lines.pop(0)
block.append(parse(lines))
result.append(block)
elif not lines[0].startswith(('else')):
result.append(lines.pop(0))
else:
break
return result[0]
# Process the equation
multilined = transform(eq)
condition = parse(multilined)
return condition
def extract_ite_func(eq):
"ite(COND, THEN, ELSE)"
# Process the equation
condition = ["r>0.0", "1.0", "0.0"]
return condition
# Latexify names
greek = ['alpha', 'beta', 'gamma', 'epsilon', 'eta', 'kappa', 'delta', 'lambda', 'mu', 'nu', 'zeta', 'sigma', 'phi', 'psi', 'rho', 'omega', 'xi', 'tau',
'Gamma', 'Delta', 'Theta', 'Lambda', 'Xi', 'Phi', 'Psi', 'Omega'
]
def _latexify_name(name, local):
parts = name.split('_')
if len(parts) == 1:
if len(name) == 1:
equiv = name
elif name in greek:
equiv = '\\' + name
else:
equiv = '{\\text{' + name + '}}'
if name in local:
equiv = '{' + equiv + '}(t)'
return equiv
elif len(parts) == 2:
equiv = ""
for p in parts:
if len(p) == 1:
equiv += '' + p + '_'
elif p in greek:
equiv += '\\' + p + '_'
else:
equiv += '{\\text{' + p + '}}' + '_'
equiv = equiv[:-1]
if name in local:
equiv = '{' + equiv + '}(t)'
return equiv
else:
equiv = '{\\text{' + name + '}}'
equiv = equiv.replace('_', '-')
if name in local:
equiv = equiv + '(t)'
return equiv
def pop_name(name):
return name.replace('_', '\_')
def _format_list(l, sep):
if not isinstance(l, list):
return l
target_list = ""
for t in l:
target_list += t + sep
return target_list[:-len(sep)]
def transform_condition(expr):
"""
Transforms the "natural" logical operators into Sympy-compatible versions.
"""
expr = expr.replace (' and ', ' & ')
expr = expr.replace (' or ', ' | ')
expr = expr.replace (' is not ', ' != ')
expr = expr.replace (' not ', ' Not ')
expr = expr.replace (' not(', ' Not(')
expr = expr.replace (' is ', ' == ')
return expr
|
vitay/ANNarchy
|
ANNarchy/parser/report/LatexParser.py
|
Python
|
gpl-2.0
| 16,861
|
[
"NEURON"
] |
e7d4d2b1c47cf7e71b2907c6e969d3516611d5fb954835a7b5390f10739cca1f
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to use validateOnly SOAP header.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Api: AdWordsOnly
"""
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service with validate only flag enabled.
client.validate_only = True
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201506')
# Construct operations to add a text ad.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': {
'urls': ['http://www.example.com']
},
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
}
}
}]
ad_group_ad_service.mutate(operations)
# No error means the request is valid.
# Now let's check an invalid ad using a very long line to trigger an error.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for all astronauts in orbit',
'headline': 'Luxury Cruise to Mars'
}
}
}]
try:
ad_group_ad_service.mutate(operations)
except suds.WebFault, e:
print 'Validation correctly failed with \'%s\'.' % str(e)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
wubr2000/googleads-python-lib
|
examples/adwords/v201506/campaign_management/validate_text_ad.py
|
Python
|
apache-2.0
| 2,766
|
[
"VisIt"
] |
bd1eea17d886f6aecc4d11071fa0e0845db9589da1260ab64f90322819a65ec7
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
# Legacy format
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# New format
DATABASES = {
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported when SEND_BROKEN_LINK_EMAILS is True. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = False # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.CallbackFilter',
'callback': lambda r: not DEBUG
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
|
disqus/django-old
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 21,172
|
[
"VisIt"
] |
b697fe703f417262438515faedfe6cfc7f1e54ea1ecd8b10aff48e7f4557ec7e
|
#!/usr/bin/env python
try:
from magpy.stream import *
from magpy.absolutes import *
from magpy.transfer import *
from magpy.database import *
except:
from magpy.stream import *
from magpy.absolutes import *
from magpy.transfer import *
from magpy.database import *
import wx
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
class AnalysisPage(wx.Panel):
#def __init__(self, parent):
# wx.Panel.__init__(self,parent,-1,size=(100,100))
def __init__(self, *args, **kwds):
wx.Panel.__init__(self, *args, **kwds)
self.filterlist = ['flat','barthann','bartlett','blackman','blackmanharris','bohman','boxcar','cosine','flattop','hamming','hann','nuttall','parzen','triang','gaussian','wiener','spline','butterworth']
self.filterlength = ['second','minute','hour','day','month','year','userdefined']
self.comp = ['xyz', 'hdz', 'idf']
self.createControls()
self.doLayout()
# Widgets
def createControls(self):
# TODO Methods:
# filter, derivative, offset, fit, baseline, k_fmi, get_min, get_max, mean, delta_f, rotation, spectrogam, powerspec, smooth
self.head1Label = wx.StaticText(self, label="Basic methods:")
# derivative, fit, rotation
self.head2Label = wx.StaticText(self, label="Get values:")
# mean, max, min
self.head3Label = wx.StaticText(self, label="Manipulation:")
# filter, smooth, offset
self.head4Label = wx.StaticText(self, label="Geomagnetic methods:")
# baseline, k_fmi, delta_f
#self.head5Label = wx.StaticText(self, label="Multiple streams:")
# merge, subtract, stack
# 1 Line
self.derivativeButton = wx.Button(self,-1,"Derivative",size=(160,30))
self.rotationButton = wx.Button(self,-1,"Rotation",size=(160,30))
self.fitButton = wx.Button(self,-1,"Fit",size=(160,30))
# 2 Line
self.meanButton = wx.Button(self,-1,"Mean",size=(160,30))
self.maxButton = wx.Button(self,-1,"Maxima",size=(160,30))
self.minButton = wx.Button(self,-1,"Minima",size=(160,30))
# 3 Line
self.offsetButton = wx.Button(self,-1,"Offsets",size=(160,30))
self.filterButton = wx.Button(self,-1,"Filter",size=(160,30))
self.smoothButton = wx.Button(self,-1,"Smooth",size=(160,30))
# 4 Line
self.activityButton = wx.Button(self,-1,"Activity",size=(160,30))
self.deltafButton = wx.Button(self,-1,"Delta F",size=(160,30))
self.baselineButton = wx.Button(self,-1,"Baseline",size=(160,30))
# 5 Line
#self.mergeButton = wx.Button(self,-1,"Merge",size=(160,30))
#self.subtractButton = wx.Button(self,-1,"Subtract",size=(160,30))
#self.stackButton = wx.Button(self,-1,"Stack/Average",size=(160,30))
# 3. Section
#self.selectfilterLabel = wx.StaticText(self, label="Select type:")
#self.selectfilterComboBox = wx.ComboBox(self, choices=self.filterlist,
# style=wx.CB_DROPDOWN, value=self.filterlist[14])
#self.selectlengthLabel = wx.StaticText(self, label="Select length:")
#self.selectlengthComboBox = wx.ComboBox(self, choices=self.filterlength,
# style=wx.CB_DROPDOWN, value=self.filterlength[0])
def doLayout(self):
# A horizontal BoxSizer will contain the GridSizer (on the left)
# and the logger text control (on the right):
boxSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
# A GridSizer will contain the other controls:
gridSizer = wx.FlexGridSizer(rows=20, cols=2, vgap=10, hgap=10)
# Prepare some reusable arguments for calling sizer.Add():
expandOption = dict(flag=wx.EXPAND)
noOptions = dict()
emptySpace = ((0, 0), noOptions)
"""
# section 3
(self.selectfilterLabel, noOptions),
(self.selectfilterComboBox, expandOption),
(self.selectlengthLabel, noOptions),
(self.selectlengthComboBox, expandOption),
emptySpace,
(self.filterButton, dict(flag=wx.ALIGN_CENTER)),
emptySpace,
emptySpace,
# end
"""
# Add the controls to the sizers:
for control, options in \
[(self.head1Label, noOptions),
emptySpace,
(self.derivativeButton, dict(flag=wx.ALIGN_CENTER)),
(self.fitButton, dict(flag=wx.ALIGN_CENTER)),
(self.rotationButton, dict(flag=wx.ALIGN_CENTER)),
emptySpace,
(self.head2Label, noOptions),
emptySpace,
(self.maxButton, dict(flag=wx.ALIGN_CENTER)),
(self.minButton, dict(flag=wx.ALIGN_CENTER)),
(self.meanButton, dict(flag=wx.ALIGN_CENTER)),
emptySpace,
(self.head3Label, noOptions),
emptySpace,
(self.filterButton, dict(flag=wx.ALIGN_CENTER)),
(self.smoothButton, dict(flag=wx.ALIGN_CENTER)),
(self.offsetButton, dict(flag=wx.ALIGN_CENTER)),
emptySpace,
(self.head4Label, noOptions),
emptySpace,
(self.deltafButton, dict(flag=wx.ALIGN_CENTER)),
(self.baselineButton, dict(flag=wx.ALIGN_CENTER)),
(self.activityButton, dict(flag=wx.ALIGN_CENTER)),
emptySpace]:
gridSizer.Add(control, **options)
for control, options in \
[(gridSizer, dict(border=5, flag=wx.ALL))]:
boxSizer.Add(control, **options)
self.SetSizerAndFit(boxSizer)
|
hschovanec-usgs/magpy
|
magpy/gui/analysispage.py
|
Python
|
gpl-3.0
| 5,932
|
[
"Gaussian"
] |
9d7b7c90a252418fe532f28fa9822dc62a8f3ee57c084377bad7433c8bfe3d53
|
# common.py - shared symbols and globals
import os, errno
# exception class so you know where exception came from
class FsDriftException(Exception):
pass
def myassert(bool_expr):
if (not bool_expr):
raise FsDriftException('assertion failed!')
NOTOK = 1
OK = 0
BYTES_PER_KiB = 1 << 10
BYTES_PER_MiB = 1 << 20
KiB_PER_GiB = 1 << 20
MiB_PER_GiB = 1 << 10
USEC_PER_SEC = 1000000
FD_UNDEFINED = -1
class rq:
READ = 0
RANDOM_READ = 1
CREATE = 2
RANDOM_WRITE = 3
APPEND = 4
SOFTLINK = 5
HARDLINK = 6
DELETE = 7
RENAME = 8
TRUNCATE = 9
REMOUNT = 10
READDIR = 11
RANDOM_DISCARD = 12
WRITE = 13
# file size can either be fixed or exponential random distribution
class FileSizeDistr:
fixed = 0
exponential = 1
def FileSizeDistr2str(v):
if v == FileSizeDistr.fixed:
return "fixed"
elif v == FileSizeDistr.exponential:
return "exponential"
raise FsDriftException(
'file size distribution must be one of: fixed, exponential')
# files are selected from population with random uniform
# or gaussian distribution.
class FileAccessDistr:
uniform = 2
gaussian = 3
def FileAccessDistr2str(v):
if v == FileAccessDistr.uniform:
return "uniform"
elif v == FileAccessDistr.gaussian:
return "gaussian"
raise FsDriftException(
'file access distribution must be one of: uniform, gaussian')
# instead of looking up before deletion, do reverse, delete and catch exception
def ensure_deleted(file_path):
try:
os.unlink(file_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
# just create an empty file
# leave exception handling to caller
def touch(fn):
open(fn, 'w').close()
# create directory if it's not already there
def ensure_dir_exists(dirpath):
if not os.path.exists(dirpath):
parent_path = os.path.dirname(dirpath)
if parent_path == dirpath:
raise FsDriftException(
'ensure_dir_exists: cannot obtain parent path of non-existent path: ' +
dirpath)
ensure_dir_exists(parent_path)
try:
os.mkdir(dirpath)
except OSError as e:
if e.errno != errno.EEXIST: # workaround for filesystem bug
raise e
else:
if not os.path.isdir(dirpath):
raise FsDriftException('%s already exists and is not a directory!'
% dirpath)
# careful with this one
def deltree(topdir):
if len(topdir) < 6:
raise FsDriftException('are you sure you want to delete %s ?' % topdir)
if not os.path.exists(topdir):
return
if not os.path.isdir(topdir):
return
for (dir, subdirs, files) in os.walk(topdir, topdown=False):
for f in files:
os.unlink(os.path.join(dir, f))
for d in subdirs:
os.rmdir(os.path.join(dir, d))
os.rmdir(topdir)
|
parallel-fs-utils/fs-drift
|
common.py
|
Python
|
apache-2.0
| 2,976
|
[
"Gaussian"
] |
111324dec7cb8b1c8fa8de38ab145d87b5d60967f963109de81797ab8f4f47c1
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
from deap import base
from deap import creator
from deap import benchmarks
from deap import tools
IND_SIZE = 10
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
def update(ind, mu, std):
for i, mu_i in enumerate(mu):
ind[i] = random.gauss(mu_i,std)
toolbox = base.Toolbox()
toolbox.register("update", update)
toolbox.register("evaluate", benchmarks.sphere)
def main():
"""Implements the One-Fifth rule algorithm as expressed in :
Kern, S., S.D. Muller, N. Hansen, D. Buche, J. Ocenasek and P. Koumoutsakos (2004).
Learning Probability Distributions in Continuous Evolutionary Algorithms -
A Comparative Review. Natural Computing, 3(1), pp. 77-112.
However instead of parent and offspring the algorithm is expressed in terms of
best and worst. Best is equivalent to the parent, and worst to the offspring.
Instead of producing a new individual each time, we have defined a function which
updates the worst individual using the best one as the mean of the gaussian and
the sigma computed as the standard deviation.
"""
random.seed(64)
logbook = tools.Logbook()
logbook.header = "gen", "fitness"
interval = (-3,7)
mu = (random.uniform(interval[0], interval[1]) for _ in range(IND_SIZE))
sigma = (interval[1] - interval[0])/2.0
alpha = 2.0**(1.0/IND_SIZE)
best = creator.Individual(mu)
best.fitness.values = toolbox.evaluate(best)
worst = creator.Individual((0.0,)*IND_SIZE)
NGEN = 1500
for g in range(NGEN):
toolbox.update(worst, best, sigma)
worst.fitness.values = toolbox.evaluate(worst)
if best.fitness <= worst.fitness:
sigma = sigma * alpha
best, worst = worst, best
else:
sigma = sigma * alpha**(-0.25)
logbook.record(gen=g, fitness=best.fitness.values)
print(logbook.stream)
return best
if __name__ == "__main__":
main()
|
marcioweck/PSSLib
|
reference/deap/examples/es/onefifth.py
|
Python
|
lgpl-3.0
| 2,795
|
[
"Gaussian"
] |
a9fab1d811c0780d85aafdc7b1dc25515b83ff881aaa1197292af3e1adfe9415
|
"""MNIST AutoEncoder Test"""
import numpy as np;
import matplotlib.pyplot as plt;
import theano;
import theano.tensor as T;
import telaugesa.datasets as ds;
from telaugesa.fflayers import ReLULayer;
from telaugesa.fflayers import SigmoidLayer;
from telaugesa.model import AutoEncoder;
from telaugesa.optimize import gd_updates;
from telaugesa.cost import binary_cross_entropy_cost;
n_epochs=100;
batch_size=100;
datasets=ds.load_mnist("../data/mnist.pkl.gz");
train_set_x, train_set_y = datasets[0];
valid_set_x, valid_set_y = datasets[1];
test_set_x, test_set_y = datasets[2];
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
ep_idx=T.lscalar();
corruption_level=T.fscalar();
encode_layer=SigmoidLayer(in_dim=784,
out_dim=500);
decode_layer=SigmoidLayer(in_dim=500,
out_dim=784);
model=AutoEncoder(layers=[encode_layer, decode_layer]);
#out=model.fprop(X, corruption_level=corruption_level, noise_type="gaussian");
out=model.fprop(X, corruption_level=corruption_level);
cost=binary_cross_entropy_cost(out[-1], X);
updates=gd_updates(cost=cost, params=model.params, method="sgd", learning_rate=0.1);
train=theano.function(inputs=[idx, corruption_level],
outputs=[cost],
updates=updates,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print "[MESSAGE] The model is built"
epoch = 0;
min_cost=None;
corr=np.random.uniform(low=0.2, high=0.3, size=1).astype("float32");
corr_best=corr[0]
while (epoch < n_epochs):
epoch = epoch + 1;
c = []
#co= [];
#corr=np.random.rand(1).astype("float32");
for batch_index in xrange(n_train_batches):
train_cost=train(batch_index, corr_best)
c.append(train_cost);
#co.append(curr_corr);
if min_cost==None:
min_cost=np.mean(c);
else:
if (np.mean(c)<min_cost):
min_cost=np.mean(c);
corr_best=corr[0]
corr=np.random.uniform(low=corr_best, high=corr_best+0.15, size=1).astype("float32");
else:
corr=np.random.uniform(low=corr_best, high=corr_best+0.15, size=1).astype("float32");
print 'Training epoch %d, cost ' % epoch, np.mean(c), corr_best;
filters=model.layers[0].W.get_value(borrow=True);
for i in xrange(500):
plt.subplot(25, 20, i);
plt.imshow(np.reshape(filters[:,i], (28, 28)), cmap = plt.get_cmap('gray'), interpolation='nearest');
plt.axis('off')
plt.show();
|
duguyue100/telaugesa
|
scripts/mnist_ae_test.py
|
Python
|
mit
| 2,845
|
[
"Gaussian"
] |
904bfd55b94387d545b93b08d0f9146b2658b8444a1e817e6e5e8d60570a3123
|
import numpy as np
from parabem.airfoil.conformal_mapping import VanDeVoorenAirfoil
from parabem.vtk_export import VtkWriter
from parabem.utils import check_path
####################################################
# analytic solution of vandevooren-airfoils #
####################################################
# -inputparameter for the vandevooren airfoil
alpha = np.deg2rad(10) # alpha is the angle of attack in rad
tau = np.deg2rad(1) # tau is the angle of the trailing edge
epsilon = 0.10 # epsilon is the thickness-parameter
num_x = 300 # number of plot-points in x-direction
num_y = 300 # number of plot-points in y-direction
# -create joukowsky object
airfoil = VanDeVoorenAirfoil(tau=tau, epsilon=epsilon)
# -helper functions
def complex_to_3vec(z):
return [z.real, z.imag, 0]
def zeta_velocity(z):
vel = airfoil.velocity(z, alpha)
return [vel.real, -vel.imag, 0.]
def z_velocity(z):
vel = airfoil.z_velocity(z, alpha)
return [vel.real, -vel.imag, 0.]
def potential(z):
pot = airfoil.potential(z, alpha)
return pot.real
def stream(z):
stream = airfoil.potential(z, alpha)
return stream.imag
# complex z-plane with circle
# ----------------------------------------------------------
z_range_x = np.linspace(-3, 3, num_x)
z_range_y = np.linspace(-3, 3, num_y)
z_grid = [x + 1j * y for y in z_range_y for x in z_range_x]
# remove all points lying inside the circle with radius r
r = airfoil.radius
z_grid = [z for z in z_grid if abs(z) > r]
# ----------------------------------------------------------
# complex zeta-plane with mapped circle (=joukowsky airfoil)
# ----------------------------------------------------------
z_to_zeta = list(map(airfoil.zeta, z_grid))
zeta_vel = list(map(zeta_velocity, z_grid))
zeta_pot = list(map(potential, z_grid))
zeta_stream = list(map(stream, z_grid))
airfoil = list(map(complex_to_3vec, airfoil.coordinates()))
# ----------------------------------------------------------
with open(check_path("results/conformal_mapping/vandevooren_zeta.vtk"), "w") as _file:
writer = VtkWriter()
writer.structed_grid(_file, "zeta_plane", [num_x, num_y, 1])
writer.points(_file, list(map(complex_to_3vec, z_to_zeta)))
writer.data(_file, zeta_stream, name="stream", _type="SCALARS", data_type="POINT_DATA")
writer.data(_file, zeta_pot, name="pot", _type="SCALARS", data_type="POINT_DATA")
writer.data(_file, zeta_vel, name="velocity", _type="VECTORS", data_type="POINT_DATA")
with open(check_path("results/conformal_mapping/vandevooren_airfoil.vtk"), "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "airfoil")
writer.points(_file, airfoil)
writer.lines(_file, [range(len(airfoil))])
|
booya-at/paraBEM
|
examples/vtk/vtk_airfoil_vandevooren.py
|
Python
|
gpl-3.0
| 2,780
|
[
"VTK"
] |
b7c046e3527d7f73daa6ef81fd996839e3fb700c07c340999c588cce356af3ec
|
from __future__ import division, print_function, absolute_import
import unittest
import GPy
import numpy as np
import networkx as nx
from numpy.testing import *
from .utilities import *
from safemdp.SafeMDP_class import reachable_set, returnable_set
from safemdp.grid_world import compute_true_safe_set, grid_world_graph
from .SafeMDP_class import link_graph_and_safe_set
class DifferenceKernelTest(unittest.TestCase):
@staticmethod
def _check(gp, x1, x2):
"""Compare the gp difference predictions on X1 and X2.
Parameters
----------
gp: GPy.core.GP
x1: np.array
x2: np.array
"""
n = x1.shape[0]
# Difference prediction with library
a = np.hstack((np.eye(n), -np.eye(n)))
m1, v1 = gp.predict_noiseless(np.vstack((x1, x2)), full_cov=True)
m1 = a.dot(m1)
v1 = np.linalg.multi_dot((a, v1, a.T))
# Predict diagonal
m2, v2 = gp.predict_noiseless(np.hstack((x1, x2)),
kern=DifferenceKernel(gp.kern),
full_cov=False)
assert_allclose(m1, m2)
assert_allclose(np.diag(v1), v2.squeeze())
# Predict full covariance
m2, v2 = gp.predict_noiseless(np.hstack((x1, x2)),
kern=DifferenceKernel(gp.kern),
full_cov=True)
assert_allclose(m1, m2)
assert_allclose(v1, v2, atol=1e-12)
def test_1d(self):
"""Test the difference kernel for a 1D input."""
# Create some GP model
kernel = GPy.kern.RBF(input_dim=1, lengthscale=0.05)
likelihood = GPy.likelihoods.Gaussian(variance=0.005 ** 2)
x = np.linspace(0, 1, 5)[:, None]
y = x ** 2
gp = GPy.core.GP(x, y, kernel, likelihood)
# Create test points
n = 10
x1 = np.linspace(0, 1, n)[:, None]
x2 = x1 + np.linspace(0, 0.1, n)[::-1, None]
self._check(gp, x1, x2)
def test_2d(self):
"""Test the difference kernel for a 2D input."""
# Create some GP model
kernel = GPy.kern.RBF(input_dim=2, lengthscale=0.05)
likelihood = GPy.likelihoods.Gaussian(variance=0.005 ** 2)
x = np.hstack((np.linspace(0, 1, 5)[:, None],
np.linspace(0.5, 1.5, 5)[:, None]))
y = x[:, [0]] ** 2 + x[:, [1]] ** 2
gp = GPy.core.GP(x, y, kernel, likelihood)
# Create test points
n = 10
x1 = np.hstack((np.linspace(0, 1, n)[:, None],
np.linspace(0.5, 1.5, n)[:, None]))
x2 = x1 + np.hstack((np.linspace(0, 0.1, n)[::-1, None],
np.linspace(0., 0.1, n)[::-1, None]))
self._check(gp, x1, x2)
class MaxOutDegreeTest(unittest.TestCase):
def test_all(self):
"""Test the max_out_degree function."""
graph = nx.DiGraph()
graph.add_edges_from(((0, 1),
(1, 2),
(2, 3),
(3, 1)))
assert_(max_out_degree(graph), 1)
graph.add_edge(0, 2)
assert_(max_out_degree(graph), 2)
graph.add_edge(2, 3)
assert_(max_out_degree(graph), 2)
graph.add_edge(3, 2)
assert_(max_out_degree(graph), 2)
graph.add_edge(3, 1)
assert_(max_out_degree(graph), 3)
class ReachableSetTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReachableSetTest, self).__init__(*args, **kwargs)
# 3
# ^
# |
# 0 --> 1 --> 2 --> 0
# ^
# |
# 4
self.graph = nx.DiGraph()
self.graph.add_edges_from([(0, 1),
(1, 2),
(2, 0),
(4, 1)], action=1)
self.graph.add_edge(2, 3, action=2)
self.safe_set = np.ones((self.graph.number_of_nodes(),
max_out_degree(self.graph) + 1),
dtype=np.bool)
link_graph_and_safe_set(self.graph, self.safe_set)
self.true = np.zeros(self.safe_set.shape[0], dtype=np.bool)
def setUp(self):
self.safe_set[:] = True
def _check(self):
reach = reachable_set(self.graph, [0])
assert_equal(reach[:, 0], self.true)
def test_all_safe(self):
"""Test reachable set if everything is safe"""
self.true[:] = [1, 1, 1, 1, 0]
self._check()
def test_unsafe1(self):
"""Test safety aspect"""
self.safe_set[1, 1] = False
self.true[:] = [1, 1, 0, 0, 0]
self._check()
def test_unsafe2(self):
"""Test safety aspect"""
self.safe_set[2, 2] = False
self.true[:] = [1, 1, 1, 0, 0]
self._check()
def test_unsafe3(self):
"""Test safety aspect"""
self.safe_set[2, 1] = False
self.true[:] = [1, 1, 1, 1, 0]
self._check()
def test_unsafe4(self):
"""Test safety aspect"""
self.safe_set[4, 1] = False
self.true[:] = [1, 1, 1, 1, 0]
self._check()
def test_out(self):
"""Test writing the output"""
self.safe_set[2, 2] = False
self.true[:] = [1, 1, 1, 0, 0]
out = np.zeros_like(self.safe_set)
reachable_set(self.graph, [0], out=out)
assert_equal(out[:, 0], self.true)
def test_error(self):
"""Check error condition"""
with assert_raises(AttributeError):
reachable_set(self.graph, [])
class ReturnableSetTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReturnableSetTest, self).__init__(*args, **kwargs)
# 3
# ^
# |
# 0 --> 1 --> 2 --> 0
# ^
# |
# 4
self.graph = nx.DiGraph()
self.graph.add_edges_from([(0, 1),
(1, 2),
(2, 0),
(4, 1)], action=1)
self.graph.add_edge(2, 3, action=2)
self.graph_rev = self.graph.reverse()
self.safe_set = np.ones((self.graph.number_of_nodes(),
max_out_degree(self.graph) + 1),
dtype=np.bool)
link_graph_and_safe_set(self.graph, self.safe_set)
self.true = np.zeros(self.safe_set.shape[0], dtype=np.bool)
def setUp(self):
self.safe_set[:] = True
def _check(self):
ret = returnable_set(self.graph, self.graph_rev, [0])
assert_equal(ret[:, 0], self.true)
def test_all_safe(self):
"""Test reachable set if everything is safe"""
self.true[:] = [1, 1, 1, 0, 1]
self._check()
def test_unsafe1(self):
"""Test safety aspect"""
self.safe_set[1, 1] = False
self.true[:] = [1, 0, 1, 0, 0]
self._check()
def test_unsafe2(self):
"""Test safety aspect"""
self.safe_set[2, 1] = False
self.true[:] = [1, 0, 0, 0, 0]
self._check()
def test_unsafe3(self):
"""Test safety aspect"""
self.safe_set[2, 2] = False
self.true[:] = [1, 1, 1, 0, 1]
self._check()
def test_unsafe4(self):
"""Test safety aspect"""
self.safe_set[4, 1] = False
self.true[:] = [1, 1, 1, 0, 0]
self._check()
def test_out(self):
"""Test writing the output"""
self.safe_set[1, 1] = False
self.true[:] = [1, 0, 1, 0, 0]
out = np.zeros_like(self.safe_set)
returnable_set(self.graph, self.graph_rev, [0], out=out)
assert_equal(out[:, 0], self.true)
def test_error(self):
"""Check error condition"""
with assert_raises(AttributeError):
reachable_set(self.graph, [])
class GridWorldGraphTest(unittest.TestCase):
"""Test the grid_world_graph function."""
def test(self):
"""Simple test"""
# 1 2 3
# 4 5 6
graph = grid_world_graph((2, 3))
graph_true = nx.DiGraph()
graph_true.add_edges_from(((1, 2),
(2, 3),
(4, 5),
(5, 6)),
action=1)
graph_true.add_edges_from(((1, 4),
(2, 5),
(3, 6)),
action=2)
graph_true.add_edges_from(((2, 1),
(3, 2),
(5, 4),
(6, 5)),
action=3)
graph_true.add_edges_from(((4, 1),
(5, 2),
(6, 3)),
action=4)
assert_(nx.is_isomorphic(graph, graph_true))
class TestTrueSafeSet(unittest.TestCase):
def test_differences_safe(self):
altitudes = np.array([[1, 2, 3],
[2, 3, 4]])
safe = compute_true_safe_set((2, 3), altitudes.reshape(-1), -1)
true_safe = np.array([[1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1],
[0, 0, 0, 1, 1, 1]],
dtype=np.bool).T
assert_equal(safe, true_safe)
def test_differences_unsafe(self):
altitudes = np.array([[1, 0, 3],
[2, 3, 0]])
safe = compute_true_safe_set((2, 3), altitudes.reshape(-1), -1)
true_safe = np.array([[1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0]],
dtype=np.bool).T
assert_equal(safe, true_safe)
if __name__ == '__main__':
unittest.main()
|
befelix/SafeMDP
|
safemdp/test.py
|
Python
|
mit
| 10,232
|
[
"Gaussian"
] |
b45b3288991e28cffcbb781ddeccac52fae219eb289b170602cfa734e6f692c0
|
# to do
# handle files that don't exist in all 3 directories
import os
import subprocess
import shutil
import hashlib
import log
from uvsconst import UVSConst
# TODO: return results from this service
# shouldnt have trouble found flag set. hard conflict found is ok, refactor trouble found to raise error
# this is a un-keyed hash, it is not stored anywhere in a uvs repo, its just for temporary comparing 2 files
# to see if they are the same or not. we could also just read them all and do a byte for byte compare.
def _get_file_hash(filepath):
""" Return a str with the hex representation of the hash of the file with the given filepath.
(assuming the file does exist and is readable. exception is raised otherwise)
"""
assert isinstance(filepath, str) or isinstance(filepath, unicode)
assert os.path.isfile(filepath)
srcfile = open(filepath, 'rb')
hash_func = hashlib.sha512()
buf = srcfile.read(UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION)
while len(buf) > 0:
hash_func.update(buf)
buf = srcfile.read(UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION)
hex_digest = hash_func.hexdigest()
log.amsv("computed sha512sum for file: " + str(filepath))
log.amsv("sha512: " + str(hex_digest))
return hex_digest
def _are_two_files_identical(file1_pathname, file2_pathname):
"""
Given pathnames for two files, compare their content and return True if the two files contain the
exact same bit patterns, False otherwise.
does not compare file names, creation date, timestamps, etc etc. just file contents
"""
assert isinstance(file1_pathname, str) or isinstance(file1_pathname, unicode)
assert isinstance(file2_pathname, str) or isinstance(file2_pathname, unicode)
assert file1_pathname != file2_pathname
assert os.path.isfile(file1_pathname)
assert os.path.isfile(file2_pathname)
log.amsv("_are_two_files_identical() called with args: ")
log.amsv("f1: " + str(file1_pathname))
log.amsv("f2: " + str(file2_pathname))
f1_hash = _get_file_hash(filepath=file1_pathname)
f2_hash = _get_file_hash(filepath=file2_pathname)
return f1_hash == f2_hash
def gui_merge3(base_dirpath, a_dirpath, b_dirpath, out_dirpath):
assert isinstance(base_dirpath, str) or isinstance(base_dirpath, unicode)
assert isinstance(a_dirpath, str) or isinstance(a_dirpath, unicode)
assert isinstance(b_dirpath, str) or isinstance(b_dirpath, unicode)
assert isinstance(out_dirpath, str) or isinstance(out_dirpath, unicode)
assert out_dirpath != base_dirpath
assert out_dirpath != a_dirpath
assert out_dirpath != b_dirpath
assert os.path.isdir(base_dirpath)
assert os.path.isdir(a_dirpath)
assert os.path.isdir(b_dirpath)
assert os.path.isdir(out_dirpath)
# kdiff3 takes the merge base as first arg (or specify it directly with -b or --base option)
# kdiff3 options:
# -m or --merge
# -o or --output (output path file or dir, this options implies -m)
diff3_cmd = "kdiff3 " + base_dirpath + " " + a_dirpath + " " + b_dirpath + " -m -o " + out_dirpath
log.amsv("kdiff3 cmd is: " + diff3_cmd)
# call will return the exit code.
cmd_exit_code = subprocess.call(diff3_cmd, shell=True)
if 0 != cmd_exit_code:
log.ams("kdiff3 exit code: " + str(cmd_exit_code))
# TODO
# make a function that does auto merge3 for just files in a directory
# handle cases where file is not in all 3.
# recursively call this
# TODO validate path names better
# if a path name contains space this may break, we need some os or other module to pre-process path names
def auto_merge3(base_dirpath, a_dirpath, b_dirpath, out_dirpath):
# TODO: maybe we allow None to be supplied as an argument, and None would indicate an empty
# folder, (the folder doesnt exist, but treat it as empty, so if None replace listdir() with empty list.
# output can not be None tho. keep that assertion
assert isinstance(base_dirpath, str) or isinstance(base_dirpath, unicode)
assert isinstance(a_dirpath, str) or isinstance(a_dirpath, unicode)
assert isinstance(b_dirpath, str) or isinstance(b_dirpath, unicode)
assert isinstance(out_dirpath, str) or isinstance(out_dirpath, unicode)
assert out_dirpath != base_dirpath
assert out_dirpath != a_dirpath
assert out_dirpath != b_dirpath
assert out_dirpath is not None
if not os.path.isdir(out_dirpath):
os.makedirs(out_dirpath)
# we do this
# call merge3_files on this level update result
# find common subdirs. for each common subdir call self with joined pathnames
# combine results and return them
results = {}
# # these need manual resolution
results['hard_conflicts_found'] = False
# # these mean something bad happened, i.e. bin files, permission denied ...
results['trouble_found'] = False
current_level_results = merge3_all_files(base_dirpath=base_dirpath, a_dirpath=a_dirpath, b_dirpath=b_dirpath,
out_dirpath=out_dirpath)
if current_level_results['hard_conflicts_found']:
results['hard_conflicts_found'] = True
if current_level_results['trouble_found']:
results['trouble_found'] = True
# done with this.
del current_level_results
log.ams("", label=False)
log.ams("------------------------------------------------------------------------------")
log.ams("----------------------------- Done merging current level, recursing. ")
base_members = []
if (base_dirpath is not None) and os.path.isdir(base_dirpath):
base_members = os.listdir(base_dirpath)
base_subdirnames = [member for member in base_members if os.path.isdir(os.path.join(base_dirpath, member))]
base_members.sort()
base_subdirnames.sort()
a_members = []
if (a_dirpath is not None) and os.path.isdir(a_dirpath):
a_members = os.listdir(a_dirpath)
a_subdirnames = [member for member in a_members if os.path.isdir(os.path.join(a_dirpath, member))]
a_members.sort()
a_subdirnames.sort()
b_members = []
if (b_dirpath is not None) and os.path.isdir(b_dirpath):
b_members = os.listdir(b_dirpath)
b_subdirnames = [member for member in b_members if os.path.isdir(os.path.join(b_dirpath, member))]
b_members.sort()
b_subdirnames.sort()
log.ams("base_members: " + str(base_members))
log.ams("base_subdirnames: " + str(base_subdirnames))
log.ams("a_members: " + str(a_members))
log.ams("a_subdirnames: " + str(a_subdirnames))
log.ams("b_members: " + str(b_members))
log.ams("b_subdirnames: " + str(b_subdirnames))
subdirs = set()
for subdir in a_subdirnames:
# if (subdir in base_subdirnames) and (subdir in b_subdirnames): pass
# if (subdir in b_subdirnames): pass
subdirs.add(subdir)
for subdir in b_subdirnames:
subdirs.add(subdir)
log.ams("about to visit subdirs: " + str(subdirs))
for subdir in subdirs:
log.ams("handling subdir: " + str(subdir))
base_subdir_path = os.path.join(base_dirpath, subdir)
a_subdir_path = os.path.join(a_dirpath, subdir)
b_subdir_path = os.path.join(b_dirpath, subdir)
out_subdir_path = os.path.join(out_dirpath, subdir)
# make the directory under merge results
if not os.path.isdir(out_subdir_path):
os.makedirs(out_subdir_path)
# temp_results = ....
temp_results = auto_merge3(base_dirpath=base_subdir_path, a_dirpath=a_subdir_path, b_dirpath=b_subdir_path,
out_dirpath=out_subdir_path)
if temp_results['hard_conflicts_found']:
results['hard_conflicts_found'] = True
if temp_results['trouble_found']:
results['trouble_found'] = True
return results
# just files, no looking into recursive subdirs.
def merge3_all_files(base_dirpath, a_dirpath, b_dirpath, out_dirpath):
# TODO: maybe we allow None to be supplied as an argument, and None would indicate an empty
# folder, (the folder doesnt exist, but treat it as empty, so if None replace listdir() with empty list.
# output can not be None tho. keep that assertion
assert isinstance(base_dirpath, str) or isinstance(base_dirpath, unicode)
assert isinstance(a_dirpath, str) or isinstance(a_dirpath, unicode)
assert isinstance(b_dirpath, str) or isinstance(b_dirpath, unicode)
assert isinstance(out_dirpath, str) or isinstance(out_dirpath, unicode)
assert out_dirpath != base_dirpath
assert out_dirpath != a_dirpath
assert out_dirpath != b_dirpath
assert out_dirpath is not None
if not os.path.isdir(out_dirpath):
os.makedirs(out_dirpath)
log.ams("", label=False)
log.ams("------------------------------------------------------------------------------")
log.ams("------------------------------------------------------------------------------")
log.ams("------------------------------------------------------------------------------")
log.ams("----------------------------------------------------------- merge3_all_files()")
log.ams("----- base_dirpath: " + str(base_dirpath))
log.ams("----- a_dirpath: " + str(a_dirpath))
log.ams("----- b_dirpath: " + str(b_dirpath))
log.ams("----- out_dirpath: " + str(out_dirpath))
log.ams("", label=False)
res = {}
# these need manual resolution
res['hard_conflicts_found'] = False
# these mean something bad happened, i.e. bin files, permission denied ...
res['trouble_found'] = False
base_members = []
if (base_dirpath is not None) and os.path.isdir(base_dirpath):
base_members = os.listdir(base_dirpath)
base_filenames = [member for member in base_members if os.path.isfile(os.path.join(base_dirpath, member))]
base_members.sort()
base_filenames.sort()
a_members = []
if (a_dirpath is not None) and os.path.isdir(a_dirpath):
a_members = os.listdir(a_dirpath)
a_filenames = [member for member in a_members if os.path.isfile(os.path.join(a_dirpath, member))]
a_members.sort()
a_filenames.sort()
b_members = []
if (b_dirpath is not None) and os.path.isdir(b_dirpath):
b_members = os.listdir(b_dirpath)
b_filenames = [member for member in b_members if os.path.isfile(os.path.join(b_dirpath, member))]
b_members.sort()
b_filenames.sort()
log.ams("base_members: " + str(base_members))
log.ams("base_filenames: " + str(base_filenames))
log.ams("a_members: " + str(a_members))
log.ams("a_filenames: " + str(a_filenames))
log.ams("b_members: " + str(b_members))
log.ams("b_filenames: " + str(b_filenames))
log.ams("---------------------------------------------------------------------------------------------")
# There are 3 main Cases
# 1- a file is in just 1 dir
# 2- a file is in 2 dirs
# 3- a file is in all three dirs
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# -------------------------------------------- case 1: filenames that exist in only 1 directory.
# case 1 subcases:
# subcase 1: file is in base
# subcase 2: file is in A.
# subcase 3: file is in B.
log.ams("", label=False)
log.ams("CASE 1, files that exist in only 1 out 3 directory.")
# -------
# subcase 1: file is in base
# ignore the file, both branches have deleted it.
log.ams("ignoring case1 subcase1: files that exist only in base. nothing to be done on these.")
# -------
# subcase 2: file is in A.
# branch A has added a file that did not exist in Base or other branch, merge3 should copy it over.
only_in_a_filenames = set()
for candidate_filename in a_filenames:
if (candidate_filename not in base_filenames) and (candidate_filename not in b_filenames):
only_in_a_filenames.add(candidate_filename)
log.ams("files found only in a: " + str(only_in_a_filenames))
# now copy these files over
for only_in_a_filename in only_in_a_filenames:
log.ams("case1 subcase2: file existed only in A, filename: " + str(only_in_a_filename))
only_in_a_filepath = os.path.join(a_dirpath, only_in_a_filename)
out_filepath = os.path.join(out_dirpath, only_in_a_filename)
try:
# shutil.copyfile(src='/da31das2/3dfgasf3hjj/a4312dadssdds', dst=out_filepath)
shutil.copyfile(src=only_in_a_filepath, dst=out_filepath)
except IOError as e:
log.ams("IOError occurred.")
log.ams("repr(e): " + repr(e))
res['trouble_found'] = True
# -------
# subcase 3: file is in B.
only_in_b_filenames = set()
for candidate_filename in b_filenames:
if (candidate_filename not in base_filenames) and (candidate_filename not in a_filenames):
only_in_b_filenames.add(candidate_filename)
log.ams("files found only in b: " + str(only_in_b_filenames))
# now copy these files over
for only_in_b_filename in only_in_b_filenames:
log.ams("case1 subcase3: file existed only in B, filename: " + str(only_in_b_filename))
only_in_b_filepath = os.path.join(b_dirpath, only_in_b_filename)
out_filepath = os.path.join(out_dirpath, only_in_b_filename)
try:
# shutil.copyfile(src='/da31das2/3dfgasf3hjj/a4312dadssdds', dst=out_filepath)
shutil.copyfile(src=only_in_b_filepath, dst=out_filepath)
except IOError as e:
log.ams("IOError occurred.")
log.ams("repr(e): " + repr(e))
res['trouble_found'] = True
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# -------------------------------------------- case 2: filenames that exist in 2 out of 3 dirs.
# case 2 subcases:
#
# subcase 1: file is in base and A.
# subcase 2: file is in base and B.
# subcase 3: file is in A and B.
#
# TODO: subcase 1 and 2 are somewhat complicated situation. maybe loot at what git is doing
# we are not sure if we should let deletions propagate or look at changes versus base.
# we have two options
# option 1: copy over the files. one branch has kept it (either unmodified or modified)
# option 2: copy over if one branch has kept but modified it, otherwise delete it. (kind of like merge3 with lines)
log.ams("", label=False)
log.ams("CASE 2, files that exist in 2 out 3 directory.")
# subcase 1: file is in base and A.
base_and_a_only_filenames = set()
for candidate_filename in a_filenames:
if (candidate_filename in base_filenames) and (candidate_filename not in b_filenames):
base_and_a_only_filenames.add(candidate_filename)
log.ams("files found in base and a only: " + str(base_and_a_only_filenames))
# now deal with subcase1
for base_and_a_only_filename in base_and_a_only_filenames:
log.ams("case2 subcase1: file existed only in Base and A, filename: " + str(base_and_a_only_filename))
base_and_a_only_filepath = os.path.join(a_dirpath, base_and_a_only_filename)
out_filepath = os.path.join(out_dirpath, base_and_a_only_filename)
try:
shutil.copyfile(src=base_and_a_only_filepath, dst=out_filepath)
except IOError as e:
log.ams("IOError occurred.")
log.ams("repr(e): " + repr(e))
res['trouble_found'] = True
# subcase 2: file is in base and B.
base_and_b_only_filenames = set()
for candidate_filename in b_filenames:
if (candidate_filename in base_filenames) and (candidate_filename not in a_filenames):
base_and_b_only_filenames.add(candidate_filename)
log.ams("files found in base and b only: " + str(base_and_b_only_filenames))
# now deal with subcase2
for base_and_b_only_filename in base_and_b_only_filenames:
log.ams("case2 subcase2: file existed only in Base and B, filename: " + str(base_and_b_only_filename))
base_and_b_only_filepath = os.path.join(b_dirpath, base_and_b_only_filename)
out_filepath = os.path.join(out_dirpath, base_and_b_only_filename)
try:
shutil.copyfile(src=base_and_b_only_filepath, dst=out_filepath)
except IOError as e:
log.ams("IOError occurred.")
log.ams("repr(e): " + repr(e))
res['trouble_found'] = True
# subcase 3: file is in A and B.
# in this subcase, we dont have a base for the merge so no merge3 is possible, we can just
# create a new file, put the markers down for one big merge conflict and copy a and b to it
# this is what diff3 merge would do, if we provided empty file for base.
a_and_b_only_filenames = set()
for candidate_filename in a_filenames:
if (candidate_filename in b_filenames) and (candidate_filename not in base_filenames):
a_and_b_only_filenames.add(candidate_filename)
log.ams("files found in a and b only: " + str(a_and_b_only_filenames))
for a_and_b_only_filename in a_and_b_only_filenames:
log.ams("case2 subcase3: file existed only in A and B, filename: " + str(a_and_b_only_filename))
try:
filepath_thru_a = os.path.join(a_dirpath, a_and_b_only_filename)
filepath_thru_b = os.path.join(b_dirpath, a_and_b_only_filename)
out_filepath = os.path.join(out_dirpath, a_and_b_only_filename)
if _are_two_files_identical(file1_pathname=filepath_thru_a, file2_pathname=filepath_thru_b):
log.ams("both branches introduced an identical file, not present in merge base.")
shutil.copyfile(src=filepath_thru_a, dst=out_filepath)
else:
log.ams("the two branches introduced different file with same name, not present in merge base.")
# in this case we dont have a base for merge3, even diff3 would do nothing but a show a big merge
# conflict, so lets manually do that.
# print UVSConst.MERGE3_CONFLICT_DELIMITER_START
# open the file for writing. write in binary mode and add the new line char manually
# i hate the stupid CRLF sequence on windows, just add a \n for newline, all self-respecting
# editors on windows can handle it.
# TODO: if we ever wanted to have a global setting that allows user to say checkout files
# with windows line ending edit this chunk to support that.
a_filehandle = open(filepath_thru_a, 'rb')
b_filehandle = open(filepath_thru_b, 'rb')
outfile_handle = open(out_filepath, 'wb')
# write the start delimiter for big conflict
outfile_handle.write(str(UVSConst.MERGE3_CONFLICT_DELIMITER_START) + str(filepath_thru_a) + '\n')
# copy over contents of the version from a
# read it chunk by chunk and copy over to out file.
temp_buf = a_filehandle.read(UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION)
while len(temp_buf) > 0:
outfile_handle.write(temp_buf)
temp_buf = a_filehandle.read(UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION)
# now common ancestor delimiters, remember CA does not exist in this case
outfile_handle.write(str(UVSConst.MERGE3_CONFLICT_DELIMITER_MIDDLE_1) +
str("No common ancestor found for this file.") + '\n')
outfile_handle.write(str(UVSConst.MERGE3_CONFLICT_DELIMITER_MIDDLE_2) + '\n')
# copy over contents of the version from b
# read it chunk by chunk and copy over to out file.
temp_buf = b_filehandle.read(UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION)
while len(temp_buf) > 0:
outfile_handle.write(temp_buf)
temp_buf = b_filehandle.read(UVSConst.DISK_IO_READ_SIZE_RECOMMENDATION)
outfile_handle.write(str(UVSConst.MERGE3_CONFLICT_DELIMITER_END) + str(filepath_thru_b) + '\n')
# this case needs manual resolution
# remember to set this, since we set conflict markers to be resolved by the user,
res['hard_conflicts_found'] = True
except IOError as e:
log.ams("IOError occurred.")
log.ams("repr(e): " + repr(e))
res['trouble_found'] = True
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# -------------------------------------------- case 3: filenames in common in all 3 dirs of the 3 way merge.
log.ams("", label=False)
log.ams("CASE 3, files that exist in all 3 out 3 directory.")
common = set()
for filename in a_filenames:
if (filename in base_filenames) and (filename in b_filenames):
common.add(filename)
log.ams("common files: " + str(common))
# launch diff3 external program to do the 3 way merging.
for filename in common:
ca_filepath = str(os.path.join(base_dirpath, filename))
s1_filepath = str(os.path.join(a_dirpath, filename))
s2_filepath = str(os.path.join(b_dirpath, filename))
out_filepath = str(os.path.join(out_dirpath, filename))
diff3_cmd = "diff3 " + s1_filepath + " " + ca_filepath + " " + s2_filepath + " -m > " + out_filepath
log.ams("diff3_cmd is: " + diff3_cmd)
# call will return the exit code.
# blackhole = open(os.devnull, 'wb')
diff3_cmd_exit_code = subprocess.call(diff3_cmd, shell=True)
# diff3 exit code 0 means no conflicts found or conflicts were auto resolved
# diff3 exit code 1 means conflicts that require manual resolution
# diff3 exit code 2 means trouble (bin files, no permissions, ....)
if 1 == diff3_cmd_exit_code:
res['hard_conflicts_found'] = True
elif 2 == diff3_cmd_exit_code:
res['trouble_found'] = True
# Done return result
return res
# TODO add some unit tests for this module.
if '__main__' == __name__:
# res = auto_merge3(base_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/ca',
# a_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/master',
# b_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/mybr',
# out_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/merge_result' )
# res = gui_merge3(base_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/ca',
# a_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/master',
# b_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/mybr',
# out_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/merge_result' )
# m3 = gui_merge3
m3 = auto_merge3
# res = m3(base_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/ca',
# a_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/master',
# b_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/mybr',
# out_dirpath='/home/lu/Desktop/merge_test/.uvs_temp/merge_result' )
res = m3(base_dirpath='/home/lu/m3_test/ca',
a_dirpath='/home/lu/m3_test/br1',
b_dirpath='/home/lu/m3_test/br2',
out_dirpath='/home/lu/m3_test/merge_res',)
print res
#
# print "test file compare:"
# print _are_two_files_identical('/home/lu/m3_test/br1/in_all', '/home/lu/m3_test/br2/in_all')
# print "test file compare:"
# print _are_two_files_identical('/home/lu/m3_test/br1/br1_and_br2', '/home/lu/m3_test/br2/br1_and_br2')
|
kouritron/uvs
|
libuvs/automergeservice.py
|
Python
|
bsd-3-clause
| 25,097
|
[
"VisIt"
] |
0a6489b6a7848feb92bc91459316a951592b612f6846141f58b2b6200cdd6cd9
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
#
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import datetime as dt
class Lammps(CMakePackage):
"""LAMMPS stands for Large-scale Atomic/Molecular Massively
Parallel Simulator. This package uses patch releases, not
stable release.
See https://github.com/spack/spack/pull/5342 for a detailed
discussion.
"""
homepage = "http://lammps.sandia.gov/"
url = "https://github.com/lammps/lammps/archive/patch_1Sep2017.tar.gz"
version('20180316', '25bad35679583e0dd8cb8753665bb84b')
version('20180222', '4d0513e3183bd57721814d217fdaf957')
version('20170922', '4306071f919ec7e759bda195c26cfd9a')
version('20170901', '767e7f07289663f033474dfe974974e7')
version('develop', git='https://github.com/lammps/lammps', branch='master')
def url_for_version(self, version):
vdate = dt.datetime.strptime(str(version), "%Y%m%d")
return "https://github.com/lammps/lammps/archive/patch_{0}.tar.gz".format(
vdate.strftime("%d%b%Y").lstrip('0'))
supported_packages = ['asphere', 'body', 'class2', 'colloid', 'compress',
'coreshell', 'dipole', 'granular', 'kspace', 'latte',
'manybody', 'mc', 'meam', 'misc', 'molecule',
'mpiio', 'peri', 'poems', 'python', 'qeq', 'reax',
'replica', 'rigid', 'shock', 'snap', 'srd',
'user-atc', 'user-h5md', 'user-lb', 'user-misc',
'user-netcdf', 'user-omp', 'voronoi']
for pkg in supported_packages:
variant(pkg, default=False,
description='Activate the {0} package'.format(pkg))
variant('lib', default=True,
description='Build the liblammps in addition to the executable')
variant('mpi', default=True,
description='Build with mpi')
depends_on('mpi', when='+mpi')
depends_on('mpi', when='+mpiio')
depends_on('fftw', when='+kspace')
depends_on('voropp', when='+voronoi')
depends_on('netcdf+mpi', when='+user-netcdf')
depends_on('blas', when='+user-atc')
depends_on('lapack', when='+user-atc')
depends_on('latte@1.0.1', when='@:20180222+latte')
depends_on('latte@1.1.1:', when='@20180316:+latte')
depends_on('blas', when='+latte')
depends_on('lapack', when='+latte')
depends_on('python', when='+python')
depends_on('mpi', when='+user-lb')
depends_on('mpi', when='+user-h5md')
depends_on('hdf5', when='+user-h5md')
conflicts('+body', when='+poems')
conflicts('+latte', when='@:20170921')
conflicts('+python', when='~lib')
conflicts('+qeq', when='~manybody')
conflicts('+user-atc', when='~manybody')
conflicts('+user-misc', when='~manybody')
conflicts('+user-phonon', when='~kspace')
conflicts('+user-misc', when='~manybody')
patch("lib.patch", when="@20170901")
patch("660.patch", when="@20170922")
root_cmakelists_dir = 'cmake'
def cmake_args(self):
spec = self.spec
args = [
'-DBUILD_SHARED_LIBS={0}'.format(
'ON' if '+lib' in spec else 'OFF'),
'-DENABLE_MPI={0}'.format(
'ON' if '+mpi' in spec else 'OFF')
]
for pkg in self.supported_packages:
opt = '-DENABLE_{0}'.format(pkg.upper())
if '+{0}'.format(pkg) in spec:
args.append('{0}=ON'.format(opt))
else:
args.append('{0}=OFF'.format(opt))
if '+kspace' in spec:
args.append('-DFFT=FFTW3')
return args
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/lammps/package.py
|
Python
|
lgpl-2.1
| 4,781
|
[
"LAMMPS",
"NetCDF"
] |
e26feeb047348541fd2fe5efa235c7a32227c30879ee4eee2a64275d644168d9
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for Epiphany."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import orca.scripts.toolkits.WebKitGtk as WebKitGtk
from orca.structural_navigation import StructuralNavigation
########################################################################
# #
# The Epiphany script class. #
# #
########################################################################
class Script(WebKitGtk.Script):
def __init__(self, app):
"""Creates a new script for the given application."""
WebKitGtk.Script.__init__(self, app, isBrowser=True)
def getEnabledStructuralNavigationTypes(self):
"""Returns a list of the structural navigation object types
enabled in this script."""
enabledTypes = [StructuralNavigation.ANCHOR,
StructuralNavigation.BLOCKQUOTE,
StructuralNavigation.BUTTON,
StructuralNavigation.CHECK_BOX,
StructuralNavigation.CHUNK,
StructuralNavigation.COMBO_BOX,
StructuralNavigation.ENTRY,
StructuralNavigation.FORM_FIELD,
StructuralNavigation.HEADING,
StructuralNavigation.LANDMARK,
StructuralNavigation.LIST,
StructuralNavigation.LIST_ITEM,
StructuralNavigation.LIVE_REGION,
StructuralNavigation.PARAGRAPH,
StructuralNavigation.RADIO_BUTTON,
StructuralNavigation.SEPARATOR,
StructuralNavigation.TABLE,
StructuralNavigation.TABLE_CELL,
StructuralNavigation.UNVISITED_LINK,
StructuralNavigation.VISITED_LINK]
return enabledTypes
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/epiphany/script.py
|
Python
|
gpl-3.0
| 2,923
|
[
"ORCA"
] |
42be9e41b9f631e9928ece1157f98c13b9fa34c7b0a412a4ac72aa3f05bb88c8
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for spectroscopy specific tools (spectrum fitting etc).
"""
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from six.moves import zip
logger = logging.getLogger(__name__)
from scipy.integrate import simps
from .fitting import fit_quad_to_peak
def align_and_scale(energy_list, counts_list, pk_find_fun=None):
"""
Parameters
----------
energy_list : iterable of ndarrays
list of ndarrays with the energy of each element
counts_list : iterable of ndarrays
list of ndarrays of counts/element
pk_find_fun : function or None
A function which takes two ndarrays and returns parameters
about the largest peak. If None, defaults to `find_largest_peak`.
For this demo, the output is (center, height, width), but this sould
be pinned down better.
Returns
-------
out_e : list of ndarray
The aligned/scaled energy arrays
out_c : list of ndarray
The count arrays (should be the same as the input)
"""
if pk_find_fun is None:
pk_find_fun = find_largest_peak
base_sigma = None
out_e, out_c = [], []
for e, c in zip(energy_list, counts_list):
E0, max_val, sigma = pk_find_fun(e, c)
if base_sigma is None:
base_sigma = sigma
out_e.append((e - E0) * base_sigma / sigma)
out_c.append(c)
return out_e, out_c
def find_largest_peak(x, y, window=None):
"""
Finds and estimates the location, width, and height of
the largest peak. Assumes the top of the peak can be
approximated as a Gaussian. Finds the peak properties
using least-squares fitting of a parabola to the log of
the counts.
The region around the peak can be approximated by
Y = Y0 * exp(- (X - X0)**2 / (2 * sigma **2))
Parameters
----------
x : ndarray
The independent variable
y : ndarary
Dependent variable sampled at positions X
window : int, optional
The size of the window around the maximum to use
for the fitting
Returns
-------
x0 : float
The location of the peak
y0 : float
The magnitude of the peak
sigma : float
Width of the peak
"""
# make sure they are _really_ arrays
x = np.asarray(x)
y = np.asarray(y)
# get the bin with the largest number of counts
j = np.argmax(y)
if window is not None:
roi = slice(np.max(j - window, 0), j + window + 1)
else:
roi = slice(0, -1)
(w, x0, y0), r2 = fit_quad_to_peak(x[roi], np.log(y[roi]))
return x0, np.exp(y0), 1/np.sqrt(-2*w)
def integrate_ROI_spectrum(bin_edges, counts, x_min, x_max):
"""Integrate region(s) of histogram.
If `x_min` and `x_max` are arrays/lists they must be equal in
length. The values contained in the 'x_value_array' must be
monotonic (up or down). The returned value is the sum of all the
regions and a single scalar value is returned. Each region is
computed independently, if regions overlap the overlapped area will
be included multiple times in the final sum.
`bin_edges` is an array of the left edges and the final right
edges of the bins. `counts` is the value in each of those bins.
The bins who's centers fall with in the integration limits are
included in the sum.
Parameters
----------
bin_edges : array
Independent variable, any unit.
Must be one longer in length than counts
counts : array
Dependent variable, any units
x_min : float or array
The lower edge of the integration region(s).
x_max : float or array
The upper edge of the integration region(s).
Returns
-------
float
The totals integrated value in same units as `counts`
"""
bin_edges = np.asarray(bin_edges)
return integrate_ROI(bin_edges[:-1] + np.diff(bin_edges),
counts, x_min, x_max)
def _formatter_array_regions(x, centers, window=1, tab_count=0):
"""Returns a formatted string of sub-sections of an array
Each value in center generates a section of the string like:
{tab_count*\t}c : [x[c - n] ... x[c] ... x[c + n + 1]]
Parameters
----------
x : array
The array to be looked into
centers : iterable
The locations to print out around
window : int, optional
how many values on either side of center to include
defaults to 1
tab_count : int, optional
The number of tabs to pre-fix lines with
default is 0
Returns
-------
str
The formatted string
"""
xl = len(x)
x = np.asarray(x)
header = ("\t"*tab_count + 'center\tarray values\n' +
"\t"*tab_count + '------\t------------\n')
return header + '\n'.join(["\t"*tab_count +
"{c}: \t {vals}".format(c=c,
vals=x[np.max([0, c-window]):
np.min([xl, c + window + 1])])
for c in centers])
def integrate_ROI(x, y, x_min, x_max):
"""Integrate region(s) of input data.
If `x_min` and `x_max` are arrays/lists they must be equal in
length. The values contained in the 'x' must be monotonic (up or
down). The returned value is the sum of all the regions and a
single scalar value is returned. Each region is computed
independently, if regions overlap the overlapped area will be
included multiple times in the final sum.
This function assumes that `y` is a function of
`x` sampled at `x`.
Parameters
----------
x : array
Independent variable, any unit
y : array
Dependent variable, any units
x_min : float or array
The lower edge of the integration region(s)
in units of x.
x_max : float or array
The upper edge of the integration region(s)
in units of x.
Returns
-------
float
The totals integrated value in same units as `y`
"""
# make sure x (x-values) and y (y-values) are arrays
x = np.asarray(x)
y = np.asarray(y)
if x.shape != y.shape:
raise ValueError("Inputs (x and y) must be the same "
"size. x.shape = {0} and y.shape = "
"{1}".format(x.shape, y.shape))
# use np.sign() to obtain array which has evaluated sign changes in all
# diff in input x_value array. Checks and tests are then run on the
# evaluated sign change array.
eval_x_arr_sign = np.sign(np.diff(x))
# check to make sure no outliers exist which violate the monotonically
# increasing requirement, and if exceptions exist, then error points to the
# location within the source array where the exception occurs.
if not np.all(eval_x_arr_sign == eval_x_arr_sign[0]):
error_locations = np.where(eval_x_arr_sign != eval_x_arr_sign[0])[0]
raise ValueError("Independent variable must be monotonically "
"increasing. Erroneous values found at x-value "
"array index locations:\n" +
_formatter_array_regions(x, error_locations))
# check whether the sign of all diff measures are negative in the
# x. If so, then the input array for both x_values and
# count are reversed so that they are positive, and monotonically increase
# in value
if eval_x_arr_sign[0] == -1:
x = x[::-1]
y = y[::-1]
logging.debug("Input values for 'x' were found to be "
"monotonically decreasing. The 'x' and "
"'y' arrays have been reversed prior to "
"integration.")
# up-cast to 1d and make sure it is flat
x_min = np.atleast_1d(x_min).ravel()
x_max = np.atleast_1d(x_max).ravel()
# verify that the number of minimum and maximum boundary values are equal
if len(x_min) != len(x_max):
raise ValueError("integration bounds must have same lengths")
# verify that the specified minimum values are actually less than the
# sister maximum value, and raise error if any minimum value is actually
# greater than the sister maximum value.
if np.any(x_min >= x_max):
raise ValueError("All lower integration bounds must be less than "
"upper integration bounds.")
# check to make sure that all specified minimum and maximum values are
# actually contained within the extents of the independent variable array
if np.any(x_min < x[0]):
error_locations = np.where(x_min < x[0])[0]
raise ValueError("Specified lower integration boundary values are "
"outside the spectrum range. All minimum integration "
"boundaries must be greater than, or equal to the "
"lowest value in spectrum range. The erroneous x_min_"
"array indices are:\n" +
_formatter_array_regions(x_min,
error_locations, window=0))
if np.any(x_max > x[-1]):
error_locations = np.where(x_max > x[-1])[0]
raise ValueError("Specified upper integration boundary values "
"are outside the spectrum range. All maximum "
"integration boundary values must be less "
"than, or equal to the highest value in the spectrum "
"range. The erroneous x_max array indices are: "
"\n" +
_formatter_array_regions(x_max,
error_locations, window=0))
# find the bottom index of each integration bound
bottom_indx = x.searchsorted(x_min)
# find the top index of each integration bound
# NOTE: +1 required for correct slicing for integration function
top_indx = x.searchsorted(x_max) + 1
# set up temporary variables
accum = 0
# integrate each region
for bot, top in zip(bottom_indx, top_indx):
# Note: If an odd number of intervals is specified, then the
# even='avg' setting calculates and averages first AND last
# N-2 intervals using trapezoidal rule.
# If calculation speed become an issue, then consider changing
# setting to 'first', or 'last' in which case trap rule is only
# applied to either first or last N-2 intervals.
accum += simps(y[bot:top], x[bot:top], even='avg')
return accum
|
hainm/scikit-xray
|
skxray/core/spectroscopy.py
|
Python
|
bsd-3-clause
| 13,125
|
[
"Gaussian"
] |
10e378c08bd02e0f01acf15bd334049c433aeb8fb3cc7e94bda0c29ca5437be7
|
#! encoding: utf-8
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module contain convenience methods to generate ROI labeled arrays for
simple shapes such as rectangles and concentric circles.
"""
from __future__ import absolute_import, division, print_function
import collections
import scipy.ndimage.measurements as ndim
from skimage.draw import line
from skimage import img_as_float, feature, color, draw
from skimage.measure import ransac, CircleModel
import numpy as np
from . import utils
import logging
logger = logging.getLogger(__name__)
def rectangles(coords, shape):
"""
This function wil provide the indices array for rectangle region of
interests.
Parameters
----------
coords : iterable
coordinates of the upper-left corner and width and height of each
rectangle: e.g., [(x, y, w, h), (x, y, w, h)]
shape : tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in coords. Order is (rr, cc).
"""
labels_grid = np.zeros(shape, dtype=np.int64)
for i, (col_coor, row_coor, col_val, row_val) in enumerate(coords):
left, right = np.max([col_coor, 0]), np.min([col_coor + col_val,
shape[0]])
top, bottom = np.max([row_coor, 0]), np.min([row_coor + row_val,
shape[1]])
slc1 = slice(left, right)
slc2 = slice(top, bottom)
if np.any(labels_grid[slc1, slc2]):
raise ValueError("overlapping ROIs")
# assign a different scalar for each roi
labels_grid[slc1, slc2] = (i + 1)
return labels_grid
def rings(edges, center, shape):
"""
Draw annual (ring-shaped) shaped regions of interest.
Each ring will be labeled with an integer. Regions outside any ring will
be filled with zeros.
Parameters
----------
edges: list
giving the inner and outer radius of each ring
e.g., [(1, 2), (11, 12), (21, 22)]
center: tuple
point in image where r=0; may be a float giving subpixel precision.
Order is (rr, cc).
shape: tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in edges.
"""
edges = np.atleast_2d(np.asarray(edges)).ravel()
if not 0 == len(edges) % 2:
raise ValueError("edges should have an even number of elements, "
"giving inner, outer radii for each ring")
if not np.all(np.diff(edges) >= 0):
raise ValueError("edges are expected to be monotonically increasing, "
"giving inner and outer radii of each ring from "
"r=0 outward")
r_coord = utils.radial_grid(center, shape).ravel()
return _make_roi(r_coord, edges, shape)
def ring_edges(inner_radius, width, spacing=0, num_rings=None):
""" Calculate the inner and outer radius of a set of rings.
The number of rings, their widths, and any spacing between rings can be
specified. They can be uniform or varied.
Parameters
----------
inner_radius : float
inner radius of the inner-most ring
width : float or list of floats
ring thickness
If a float, all rings will have the same thickness.
spacing : float or list of floats, optional
margin between rings, 0 by default
If a float, all rings will have the same spacing. If a list,
the length of the list must be one less than the number of
rings.
num_rings : int, optional
number of rings
Required if width and spacing are not lists and number
cannot thereby be inferred. If it is given and can also be
inferred, input is checked for consistency.
Returns
-------
edges : array
inner and outer radius for each ring
Example
-------
# Make two rings starting at r=1px, each 5px wide
>>> ring_edges(inner_radius=1, width=5, num_rings=2)
[(1, 6), (6, 11)]
# Make three rings of different widths and spacings.
# Since the width and spacings are given individually, the number of
# rings here is simply inferred.
>>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2))
[(1, 6), (7, 11), (13, 16)]
"""
# All of this input validation merely checks that width, spacing, and
# num_rings are self-consistent and complete.
width_is_list = isinstance(width, collections.abc.Iterable)
spacing_is_list = isinstance(spacing, collections.abc.Iterable)
if (width_is_list and spacing_is_list):
if len(width) != len(spacing) - 1:
raise ValueError("List of spacings must be one less than list "
"of widths.")
if num_rings is None:
try:
num_rings = len(width)
except TypeError:
try:
num_rings = len(spacing) + 1
except TypeError:
raise ValueError("Since width and spacing are constant, "
"num_rings cannot be inferred and must be "
"specified.")
else:
if width_is_list:
if num_rings != len(width):
raise ValueError("num_rings does not match width list")
if spacing_is_list:
if num_rings-1 != len(spacing):
raise ValueError("num_rings does not match spacing list")
# Now regularlize the input.
if not width_is_list:
width = np.ones(num_rings) * width
if not spacing_is_list:
spacing = np.ones(num_rings - 1) * spacing
# The inner radius is the first "spacing."
all_spacings = np.insert(spacing, 0, inner_radius)
steps = np.array([all_spacings, width]).T.ravel()
edges = np.cumsum(steps).reshape(-1, 2)
return edges
def segmented_rings(edges, segments, center, shape, offset_angle=0):
"""
Parameters
----------
edges : array
inner and outer radius for each ring
segments : int or list
number of pie slices or list of angles in radians
That is, 8 produces eight equal-sized angular segments,
whereas a list can be used to produce segments of unequal size.
center : tuple
point in image where r=0; may be a float giving subpixel precision.
Order is (rr, cc).
shape: tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
angle_offset : float or array, optional
offset in radians from offset_angle=0 along the positive X axis
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in edges and segments
See Also
--------
ring_edges : Calculate the inner and outer radius of a set of rings.
"""
edges = np.asarray(edges).ravel()
if not 0 == len(edges) % 2:
raise ValueError("edges should have an even number of elements, "
"giving inner, outer radii for each ring")
if not np.all(np.diff(edges) >= 0):
raise ValueError("edges are expected to be monotonically increasing, "
"giving inner and outer radii of each ring from "
"r=0 outward")
agrid = utils.angle_grid(center, shape)
agrid[agrid < 0] = 2*np.pi + agrid[agrid < 0]
segments_is_list = isinstance(segments, collections.abc.Iterable)
if segments_is_list:
segments = np.asarray(segments) + offset_angle
else:
# N equal segments requires N+1 bin edges spanning 0 to 2pi.
segments = np.linspace(0, 2*np.pi, num=1+segments, endpoint=True)
segments += offset_angle
# the indices of the bins(angles) to which each value in input
# array(angle_grid) belongs.
ind_grid = (np.digitize(np.ravel(agrid), segments,
right=False)).reshape(shape)
label_array = np.zeros(shape, dtype=np.int64)
# radius grid for the image_shape
rgrid = utils.radial_grid(center, shape)
# assign indices value according to angles then rings
len_segments = len(segments)
for i in range(len(edges) // 2):
indices = (edges[2*i] <= rgrid) & (rgrid < edges[2*i + 1])
# Combine "segment #" and "ring #" to get unique label for each.
label_array[indices] = ind_grid[indices] + (len_segments - 1) * i
return label_array
def roi_max_counts(images_sets, label_array):
"""
Return the brightest pixel in any ROI in any image in the image set.
Parameters
----------
images_sets : array
iterable of 4D arrays
shapes is: (len(images_sets), )
label_array : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
max_counts : int
maximum pixel counts
"""
max_cts = 0
for img_set in images_sets:
for img in img_set:
max_cts = max(max_cts, ndim.maximum(img, label_array))
return max_cts
def roi_pixel_values(image, labels, index=None):
"""
This will provide intensities of the ROI's of the labeled array
according to the pixel list
eg: intensities of the rings of the labeled array
Parameters
----------
image : array
image data dimensions are: (rr, cc)
labels : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
index_list : list, optional
labels list
eg: 5 ROI's
index = [1, 2, 3, 4, 5]
Returns
-------
roi_pix : list
intensities of the ROI's of the labeled array according
to the pixel list
"""
if labels.shape != image.shape:
raise ValueError("Shape of the image data should be equal to"
" shape of the labeled array")
if index is None:
index = np.arange(1, np.max(labels) + 1)
roi_pix = []
for n in index:
roi_pix.append(image[labels == n])
return roi_pix, index
def mean_intensity(images, labeled_array, index=None):
"""Compute the mean intensity for each ROI in the image list
Parameters
----------
images : list
List of images
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
index : int, list, optional
The ROI's to use. If None, this function will extract averages for all
ROIs
Returns
-------
mean_intensity : array
The mean intensity of each ROI for all `images`
Dimensions:
- len(mean_intensity) == len(index)
- len(mean_intensity[0]) == len(images)
index : list
The labels for each element of the `mean_intensity` list
"""
if labeled_array.shape != images[0].shape[0:]:
raise ValueError(
"`images` shape (%s) needs to be equal to the labeled_array shape"
"(%s)" % (images[0].shape, labeled_array.shape))
# handle various input for `index`
if index is None:
index = list(np.unique(labeled_array))
index.remove(0)
try:
len(index)
except TypeError:
index = [index]
# pre-allocate an array for performance
# might be able to use list comprehension to make this faster
mean_intensity = np.zeros((images.shape[0], len(index)))
for n, img in enumerate(images):
# use a mean that is mask-aware
mean_intensity[n] = ndim.mean(img, labeled_array, index=index)
return mean_intensity, index
def circular_average(image, calibrated_center, threshold=0, nx=100,
pixel_size=(1, 1), min_x=None, max_x=None, mask=None):
"""Circular average of the the image data
The circular average is also known as the radial integration
Parameters
----------
image : array
Image to compute the average as a function of radius
calibrated_center : tuple
The center of the image in pixel units
argument order should be (row, col)
threshold : int, optional
Ignore counts below `threshold`
default is zero
nx : int, optional
number of bins in x
defaults is 100 bins
pixel_size : tuple, optional
The size of a pixel (in a real unit, like mm).
argument order should be (pixel_height, pixel_width)
default is (1, 1)
min_x : float, optional number of pixels
Left edge of first bin defaults to minimum value of x
max_x : float, optional number of pixels
Right edge of last bin defaults to maximum value of x
mask : mask for 2D data. Assumes 1 is non masked and 0 masked.
None defaults to no mask.
Returns
-------
bin_centers : array
The center of each bin in R. shape is (nx, )
ring_averages : array
Radial average of the image. shape is (nx, ).
See Also
--------
bad_to_nan_gen : Create a mask with np.nan entries
bin_grid : Bin and integrate an image, given the radial array of pixels
Useful for nonlinear spacing (Ewald curvature)
"""
radial_val = utils.radial_grid(calibrated_center, image.shape, pixel_size)
if mask is not None:
w = np.where(mask == 1)
radial_val = radial_val[w]
image = image[w]
bin_edges, sums, counts = utils.bin_1D(np.ravel(radial_val),
np.ravel(image), nx,
min_x=min_x,
max_x=max_x)
th_mask = counts > threshold
ring_averages = sums[th_mask] / counts[th_mask]
bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask]
return bin_centers, ring_averages
def kymograph(images, labels, num):
"""
This function will provide data for graphical representation of pixels
variation over time for required ROI.
Parameters
----------
images : array
Image stack. dimensions are: (num_img, num_rows, num_cols)
labels : array
labeled array; 0 is background. Each ROI is represented by an integer
num : int
The ROI to turn into a kymograph
Returns
-------
kymograph : array
data for graphical representation of pixels variation over time
for required ROI
"""
kymo = []
for n, img in enumerate(images):
kymo.append((roi_pixel_values(img, labels == num)[0]))
return np.vstack(kymo)
def extract_label_indices(labels):
"""
This will find the label's required region of interests (roi's),
number of roi's count the number of pixels in each roi's and pixels
list for the required roi's.
Parameters
----------
labels : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
label_mask : array
1D array labeling each foreground pixel
e.g., [1, 1, 1, 1, 2, 2, 1, 1]
indices : array
1D array of indices into the raveled image for all
foreground pixels (labeled nonzero)
e.g., [5, 6, 7, 8, 14, 15, 21, 22]
"""
img_dim = labels.shape
# TODO Make this tighter.
w = np.where(np.ravel(labels) > 0)
grid = np.indices((img_dim[0], img_dim[1]))
pixel_list = np.ravel((grid[0] * img_dim[1] + grid[1]))[w]
# discard the zeros
label_mask = labels[labels > 0]
return label_mask, pixel_list
def _make_roi(coords, edges, shape):
""" Helper function to create ring rois and bar rois
Parameters
----------
coords : array
shape is image shape
edges : list
List of tuples of inner (left or top) and outer (right or bottom)
edges of each roi.
e.g., edges=[(1, 2), (11, 12), (21, 22)]
shape : tuple
Shape of the image in which to create the ROIs
e.g., shape=(512, 512)
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are
specified in `edges`.
Has shape=`image shape`
"""
label_array = np.digitize(coords, edges, right=False)
# Even elements of label_array are in the space between rings.
label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2
return label_array.reshape(shape)
def bar(edges, shape, horizontal=True, values=None):
"""Draw bars defined by `edges` from one edge to the other of `image_shape`
Bars will be horizontal or vertical depending on the value of `horizontal`
Parameters
----------
edges : list
List of tuples of inner (left or top) and outer (right or bottom)
edges of each bar.
e.g., edges=[(1, 2), (11, 12), (21, 22)]
shape : tuple
Shape of the image in which to create the ROIs
e.g., shape=(512, 512)
horizontal : bool, optional
True: Make horizontal bars
False: Make vertical bars
Defaults to True
values : array, optional
image pixels co-ordinates
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are
specified in `edges`.
Has shape=`image shape`
Note
----
The primary use case is in GISAXS.
"""
edges = np.atleast_2d(np.asarray(edges)).ravel()
if not 0 == len(edges) % 2:
raise ValueError("edges should have an even number of elements, "
"giving inner, outer edge value for each bar")
if not np.all(np.diff(edges) >= 0):
raise ValueError("edges are expected to be monotonically increasing, "
"giving inner and outer radii of each bar from "
"r=0 outward")
if values is None:
values = np.repeat(range(shape[0]), shape[1])
if not horizontal:
values = np.tile(range(shape[1]), shape[0])
return _make_roi(values, edges, shape)
def box(shape, v_edges, h_edges=None, h_values=None, v_values=None):
"""Draw box shaped rois when the horizontal and vertical edges
are provided.
Parameters
----------
shape : tuple
Shape of the image in which to create the ROIs
e.g., shape=(512, 512)
v_edges : list
giving the inner and outer edges of each vertical bar
e.g., [(1, 2), (11, 12), (21, 22)]
h_edges : list, optional
giving the inner and outer edges of each horizontal bar
e.g., [(1, 2), (11, 12), (21, 22)]
h_values : array, optional
image pixels co-ordinates in horizontal direction
shape has to be image shape
v_values : array, optional
image pixels co-ordinates in vertical direction
shape has to be image shape
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in edges.
Note
----
To draw boxes according to the image pixels co-ordinates has to provide
both h_values and v_values. The primary use case is in GISAXS.
e.g., v_values=gisaxs_qy, h_values=gisaxs_qx
"""
if h_edges is None:
h_edges = v_edges
if h_values is None and v_values is None:
v_values, h_values = np.mgrid[:shape[0], :shape[1]]
elif h_values.shape != v_values.shape:
raise ValueError("Shape of the h_values array should be equal to"
" shape of the v_values array")
for edges in (h_edges, v_edges):
edges = np.atleast_2d(np.asarray(edges)).ravel()
if not 0 == len(edges) % 2:
raise ValueError("edges should have an even number of elements, "
"giving inner, outer edges for each roi")
coords = []
for h in h_edges:
for v in v_edges:
coords.append((h[0], v[0], h[1]-h[0], v[1] - v[0]))
return rectangles(coords, v_values.shape)
def lines(end_points, shape):
"""
Parameters
----------
end_points : iterable
coordinates of the starting point and the ending point of each
line: e.g., [(start_x, start_y, end_x, end_y), (x1, y1, x2, y2)]
shape : tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in coords. Order is (rr, cc).
"""
label_array = np.zeros(shape, dtype=np.int64)
label = 0
for points in end_points:
if len(points) != 4:
raise ValueError("end points should have four number of"
" elements, giving starting co-ordinates,"
" ending co-ordinates for each line")
rr, cc = line(np.max([points[0], 0]), np.max([points[1], 0]),
np.min([points[2], shape[0]-1]),
np.min([points[3], shape[1]-1]))
label += 1
label_array[rr, cc] = label
return label_array
def auto_find_center_rings(avg_img, sigma=1, no_rings=4, min_samples=3,
residual_threshold=1, max_trials=1000):
"""This will find the center of the speckle pattern and the radii of the
most intense rings.
Parameters
----------
avg_img : 2D array
shape of the image
sigma : float, optional
Standard deviation of the Gaussian filter.
no_rings : int, optional
number of rings
min_sample : int, optional
The minimum number of data points to fit a model to.
residual_threshold : float, optional
Maximum distance for a data point to be classified as an inlier.
max_trials : int, optional
Maximum number of iterations for random sample selection.
Returns
-------
center : tuple
center co-ordinates of the speckle pattern
image : 2D array
Indices of pixels that belong to the rings,
directly index into an array
radii : list
values of the radii of the rings
Note
----
scikit-image ransac
method(http://www.imagexd.org/tutorial/lessons/1_ransac.html) is used to
automatically find the center and the most intense rings.
"""
image = img_as_float(color.rgb2gray(avg_img))
edges = feature.canny(image, sigma)
coords = np.column_stack(np.nonzero(edges))
edge_pts_xy = coords[:, ::-1]
radii = []
for i in range(no_rings):
model_robust, inliers = ransac(edge_pts_xy, CircleModel, min_samples,
residual_threshold,
max_trials=max_trials)
if i == 0:
center = int(model_robust.params[0]), int(model_robust.params[1])
radii.append(model_robust.params[2])
rr, cc = draw.circle_perimeter(center[1], center[0],
int(model_robust.params[2]),
shape=image.shape)
image[rr, cc] = i + 1
edge_pts_xy = edge_pts_xy[~inliers]
return center, image, radii
|
tacaswell/scikit-beam
|
skbeam/core/roi.py
|
Python
|
bsd-3-clause
| 26,488
|
[
"Gaussian"
] |
4bc1c8f223a430db1c28772256f869ebb2ee1cfbff275ad02132cf881a3b9038
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
from __future__ import print_function
import unittest
import sys, os.path
pkg_dir = os.path.dirname(os.path.realpath(__file__)) + '/../'
sys.path.append(pkg_dir)
from CRPGaussianSamplers import *
class TestCRPGaussianCollapsedGibbs(unittest.TestCase):
def setUp(self):
N_ITER = 1000
self.crp_sampler = CRPGaussianCollapsedGibbs(cl_mode = True)
self.crp_sampler.set_sampling_params(niter = N_ITER)
def test_cl_1d(self):
print('Testing the OpenCL Gaussian Collapsed Gibbs sampler with 1-dimensional Gaussian data...')
obs = np.hstack((np.random.normal(1, 1, 81), np.random.normal(20, 1, 81), np.random.normal(10,1,300)))
self.crp_sampler.direct_read_obs(obs)
gpu_time, total_time, common_clusters = self.crp_sampler.do_inference()
print('Finished %d iterations\nOpenCL device time %f seconds; Total time %f seconds' % (self.crp_sampler.niter, gpu_time, total_time), file=sys.stderr)
self.assertTrue(gpu_time < total_time and len(common_clusters) > 0)
def test_cl_2d(self):
print('Testing the OpenCL Gaussian Collapsed Gibbs sampler with 1-dimensional Gaussian data...')
obs = np.vstack((np.random.multivariate_normal(mean = [1., 1.], cov = [[.1, 0], [0, .1]], size = 12),
np.random.multivariate_normal(mean = [55., 52.], cov = [[0.1, 0], [0, .1]], size = 12)))
self.crp_sampler.direct_read_obs(obs)
gpu_time, total_time, common_clusters = self.crp_sampler.do_inference()
print('Finished %d iterations\nOpenCL device time %f seconds; Total time %f seconds' % (self.crp_sampler.niter, gpu_time, total_time), file=sys.stderr)
self.assertTrue(gpu_time < total_time and len(common_clusters) > 0)
if __name__ == '__main__':
unittest.main()
|
AusterweilLab/MPBNP
|
unittests/CRPGaussianCollapsedGibbsTest.py
|
Python
|
mit
| 1,859
|
[
"Gaussian"
] |
76d9c047b1f70d7eb6e1b750807f5d0afc25921c1e0b7b034857dafb1c06f882
|
"""
Add a state column to the history_dataset_association and library_dataset_dataset_association table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.exc import *
from migrate import *
from migrate.changeset import *
from galaxy.model.custom_types import *
import datetime
now = datetime.datetime.utcnow
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
DATASET_INSTANCE_TABLE_NAMES = [ 'history_dataset_association', 'library_dataset_dataset_association' ]
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
dataset_instance_tables = []
for table_name in DATASET_INSTANCE_TABLE_NAMES:
try:
dataset_instance_tables.append( ( table_name, Table( table_name, metadata, autoload=True ) ) )
except NoSuchTableError:
log.debug( "Failed loading table %s" % table_name )
if dataset_instance_tables:
for table_name, dataset_instance_table in dataset_instance_tables:
index_name = "ix_%s_state" % table_name
try:
col = Column( "state", TrimmedString( 64 ), index=True, nullable=True )
col.create( dataset_instance_table, index_name = index_name)
assert col is dataset_instance_table.c.state
except Exception, e:
log.debug( "Adding column 'state' to %s table failed: %s" % ( table_name, str( e ) ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
dataset_instance_tables = []
for table_name in DATASET_INSTANCE_TABLE_NAMES:
try:
dataset_instance_tables.append( ( table_name, Table( table_name, metadata, autoload=True ) ) )
except NoSuchTableError:
log.debug( "Failed loading table %s" % table_name )
if dataset_instance_tables:
for table_name, dataset_instance_table in dataset_instance_tables:
try:
col = dataset_instance_table.c.state
col.drop()
except Exception, e:
log.debug( "Dropping column 'state' from %s table failed: %s" % ( table_name, str( e ) ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0048_dataset_instance_state_column.py
|
Python
|
gpl-3.0
| 2,434
|
[
"Galaxy"
] |
fa954332e4c6d60d50ce6c93b8eedf2fee3e313e67634652a6ddf468dc70a47f
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
# This example illustrates loading a model from an SWC file, inserting
# spines, and viewing it.
import moogli
import moose
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
sys.path.append( '../util' )
PI = 3.14159265358979
frameRunTime = 0.0002
runtime = 1.0
inject = 5e-10
simdt = 5e-5
RM = 1.0
RA = 1.0
CM = 0.01
spineSpacing = 2.0e-6
minSpacing = 0.2e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle = 0
spineAngleDistrib = 2*PI
filename = 'barrionuevo_cell1zr.CNG.swc'
#filename = 'h10.CNG.swc'
################################################################
# Utility function for building a compartment, used for spines.
def buildCompt( pa, name, length, dia, xoffset, RM, RA, CM ):
compt = moose.Compartment( pa.path + '/' + name )
compt.x0 = xoffset
compt.y0 = 0
compt.z0 = 0
compt.x = length + xoffset
compt.y = 0
compt.z = 0
compt.diameter = dia
compt.length = length
xa = dia * dia * PI / 4.0
sa = length * dia * PI
compt.Ra = length * RA / xa
compt.Rm = RM / sa
compt.Cm = CM * sa
return compt
def makeSpineProto():
spine = moose.Neutral( '/library/spine' )
shaft = buildCompt( spine, 'shaft', 1e-6, 0.2e-6, 0, RM, RA, CM )
head = buildCompt( spine, 'head', 0.5e-6, 0.5e-6, 1e-6, RM, RA, CM )
moose.connect( shaft, 'axial', head, 'raxial' )
def main():
"""
This snippet illustrates how the Neuron class does the spine
specification, without the rdesigneur intermediate.
"""
app = QtGui.QApplication(sys.argv)
moose.Neutral( '/library' )
makeSpineProto()
model = moose.loadModel( filename, '/model' )
model[0].buildSegmentTree()
model[0].spineDistribution = [ \
'spine', '#apical#', \
'spacing', str( spineSpacing ), \
'spacingDistrib', str( minSpacing ), \
'angle', str( spineAngle ), \
'angleDistrib', str( spineAngleDistrib ), \
'size', str( spineSize ), \
'sizeDistrib', str( spineSizeDistrib ), \
'' \
]
moose.reinit()
# Now we set up the display
compts = moose.wildcardFind( "/model/#[ISA=CompartmentBase]" )
compts[0].inject = inject
ecomptPath = map( lambda x : x.path, compts )
morphology = moogli.read_morphology_from_moose(name = "", path = "/model")
morphology.create_group( "group_all", ecomptPath, -0.08, 0.02, \
[0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 0.9] )
viewer = moogli.DynamicMorphologyViewerWidget(morphology)
def callback( morphology, viewer ):
moose.start( frameRunTime )
Vm = map( lambda x: moose.element( x ).Vm, compts )
morphology.set_color( "group_all", Vm )
currTime = moose.element( '/clock' ).currentTime
#print currTime, compts[0].Vm
if ( currTime < runtime ):
return True
return False
viewer.set_callback( callback, idletime = 0 )
viewer.showMaximized()
viewer.show()
app.exec_()
if __name__ == '__main__':
main()
|
dilawar/moose-full
|
moose-examples/snippets/insertSpinesWithoutRdesigneur.py
|
Python
|
gpl-2.0
| 3,531
|
[
"MOOSE",
"NEURON"
] |
68f7fea979614c1e900ab3feaa0bf3d37391601da43d2a9dd07d5b0244fa4ef3
|
#!/usr/bin/python
import click
import os
from sidr import default
from sidr import runfile
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def validate_taxdump(value, method):
if (os.path.isfile("%s/%s" % (value, "names.dmp")) and
os.path.isfile("%s/%s" % (value, "nodes.dmp")) and
os.path.isfile("%s/%s" % (value, "merged.dmp")) and
os.path.isfile("%s/%s" % (value, "delnodes.dmp"))):
return value
else:
with click.Context(method) as ctx:
click.echo(ctx.get_help())
raise click.BadParameter("Could not find names.dmp in taxdump, specify a value or make sure the files are present")
@click.group()
def cli():
"""
Analyzes genomic data and attempts to classify contigs using a machine learning framework.
SIDR uses data fron BLAST (or similar classifiers)to train a Decision Tree model to classify sequence data as either belonging to a target organism, or belonging to something else. This classification can be used to filter the data for later assembly.
To use SIDR, you will need to construct a preliminary assembly, align your reads back to that assembly, then use BLAST to classify the assembly contigs.
"""
pass
@cli.command(name="default", context_settings=CONTEXT_SETTINGS)
@click.option('--bam', '-b', type=click.Path(exists=True), help="Alignment of reads to preliminary assembly, in BAM format.")
@click.option('--fasta', '-f', type=click.Path(exists=True), help="Preliminary assembly, in FASTA format.")
@click.option('--blastresults', '-r', type=click.Path(exists=True), help="Classification of preliminary assembly from BLAST (or similar tools).")
@click.option('--taxdump', '-d', type=click.Path(), default=os.environ.get('BLASTDB'), help="Location of the NCBI Taxonomy dump. Default is $BLASTDB.")
#@click.option('--model', '-m', 'modelOutput', type=click.Path(), default="", help="Location to save a graphical representation of the trained decision tree (optional). Output is in the form of a DOT file.")
@click.option('--output', '-o', type=click.Path(), default="%s/classifications.txt" % os.getcwd())
@click.option('--tokeep', '-k', type=click.Path(), default="", help="Location to save the contigs identified as the target organism(optional).")
@click.option('--toremove', '-x', type=click.Path(), default="", help="Location to save the contigs identified as not belonging to the target organism (optional).")
@click.option('--binary', is_flag=True, help="Use binary target/nontarget classification.")
@click.option('--target', '-t', help="The identity of the target organism at the chosen classification level. It is recommended to use the organism's phylum.")
@click.option('--level', '-l', default="phylum", help="The classification level to use when constructing the model. Default is 'phylum'.")
# @click.option('--verbose', '-v', count=True, help="Output more debugging options, repeat to increase verbosity (unimplemented).")
def default_runner(bam, fasta, blastresults, taxdump, output, tokeep, toremove, binary, target, level):
"""
Runs the default analysis using raw preassembly data.
"""
modelOutput = False
validate_taxdump(taxdump, default_runner)
default.runAnalysis(bam, fasta, blastresults, taxdump, modelOutput, output, tokeep, toremove, binary, target, level)
@cli.command(name="runfile", context_settings=CONTEXT_SETTINGS)
@click.option('--infile', '-i', type=click.Path(exists=True), help="Comma-delimited input file.")
@click.option('--taxdump', '-d', type=click.Path(), default=os.environ.get('BLASTDB'), help="Location of the NCBI Taxonomy dump. Default is $BLASTDB.")
@click.option('--output', '-o', type=click.Path(), default="%s/classifications.txt" % os.getcwd())
#@click.option('--model', '-m', 'modelOutput', type=click.Path(), default="", help="Location to save a graphical representation of the trained decision tree (optional). Output is in the form of a DOT file.")
@click.option('--tokeep', '-k', type=click.Path(), default="", help="Location to save the contigs identified as the target organism(optional).")
@click.option('--toremove', '-x', type=click.Path(), default="", help="Location to save the contigs identified as not belonging to the target organism (optional).")
@click.option('--target', '-t', help="The identity of the target organism at the chosen classification level. It is recommended to use the organism's phylum.")
@click.option('--binary', is_flag=True, help="Use binary target/nontarget classification.")
@click.option('--level', '-l', default="phylum", help="The classification level to use when constructing the model. Default is 'phylum'.")
def runfile_runner(infile, taxdump, output, tokeep, toremove, binary, target, level):
"""
Runs a custom analysis using pre-computed data from BBMap or other sources.
Input data will be read for all variables which will be used to construct a Decision Tree model.
"""
modelOutput = False
validate_taxdump(taxdump, runfile_runner)
runfile.runAnalysis(taxdump, infile, level, modelOutput, output, tokeep, toremove, binary, target)
""" WIP
@cli.command(name="filter", context_settings=CONTEXT_SETTINGS)
@click.option('--tokeep', '-k', type=click.Path(), default="", help="File containing list of contigs from the alignment to keep.")
@click.option('--bam', '-b', type=click.Path(exists=True), help="Alignment of reads to preliminary assembly, in BAM format.")
@click.option('-i1', type=click.Path(exists=True), help="Right read fastq to extract reads from.")
@click.option('-i2', type=click.Path(exists=True), help="Left read fastq to extract reads from.")
@click.option('-o1', type=click.Path(), help="Right read fastq to extract reads to.")
@click.option('-o2', type=click.Path(), help="Left read fastq to extract reads to.")
def filter_runner(tokeep, bam, i1, i2, o1, o2):
"""
#Filters reads aligning to the given contigs.
"""
filterReads.runFilter(tokeep, bam, i1, i2, o1, o2)
if __name__ == "__main__": # TODO Setuptools
cli(prog_name="sidr")
"""
|
damurdock/SIDR
|
sidr/cli.py
|
Python
|
mit
| 6,054
|
[
"BLAST"
] |
dd62026aba57be1899a6aa67df1957ee31bc4ac95c46a9fdb506c9da7d13a2d5
|
import ocl
import pyocl
import camvtk
import time
import datetime
import vtk
def main(filename="frame/f.png"):
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(20, 12, 20)
myscreen.camera.SetFocalPoint(0,0, 0)
# axis arrows
camvtk.drawArrows(myscreen,center=(2,2,2))
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
c = ocl.CylCutter(1) # cutter
c.length = 3
print "cutter length=", c.length
p1 = ocl.CLPoint(-0.2,-0.2,0.2) # start of move
p2 = ocl.CLPoint(-0.2,0.2,0.0) # end of move
p3 = ocl.CLPoint(0.5,0.0,-0.5)
clpoints=[]
clpoints.append(p1)
clpoints.append(p2)
clpoints.append(p3)
f=ocl.Ocode()
f.set_depth(6) # depth and scale set here.
f.set_scale(1)
# cube
cube1 = ocl.CubeOCTVolume()
cube1.side=2.123
cube1.center = ocl.Point(0,0,0)
cube1.calcBB()
stock = ocl.LinOCT()
stock.init(3)
stock.build( cube1 )
# draw initial octree
tlist = pyocl.octree2trilist(stock)
surf = camvtk.STLSurf(triangleList=tlist)
myscreen.addActor(surf)
Nmoves = len(clpoints)
print Nmoves,"CL-points to process"
for n in xrange(0,Nmoves-1):
#if n<Nmoves-1:
print n," to ",n+1
startp = clpoints[n]
endp = clpoints[n+1]
sweep = ocl.LinOCT()
sweep.init(3)
g1vol = ocl.CylMoveOCTVolume(c, ocl.Point(startp.x,startp.y,startp.z), ocl.Point(endp.x,endp.y,endp.z))
camvtk.drawCylCutter(myscreen, c, startp)
camvtk.drawCylCutter(myscreen, c, endp)
myscreen.addActor( camvtk.Line( p1=(startp.x,startp.y,startp.z), p2=(endp.x,endp.y,endp.z), color=camvtk.red))
sweep.build( g1vol )
stock.diff(sweep)
myscreen.removeActor(surf)
tlist = pyocl.octree2trilist(stock)
surf = camvtk.STLSurf(triangleList=tlist)
surf.SetColor(camvtk.cyan)
surf.SetOpacity(1.0)
myscreen.addActor(surf)
myscreen.render()
time.sleep(0.2)
#exit()
# draw trees
#print "drawing trees"
#camvtk.drawTree2(myscreen, stock, opacity=1, color=camvtk.cyan)
# box around octree
oct_cube = camvtk.Cube(center=(0,0,0), length=4*f.get_scale(), color=camvtk.white)
oct_cube.SetWireframe()
myscreen.addActor(oct_cube)
# OCL text
title = camvtk.Text()
title.SetPos( (myscreen.width-350, myscreen.height-30) )
title.SetText("OpenCAMLib " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.addActor(title)
print " render()...",
myscreen.render()
print "done."
lwr.SetFileName(filename)
time.sleep(0.2)
#lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
main()
|
davidwusea/opencamlib
|
scripts/ocode/cutsim_test_1.py
|
Python
|
gpl-3.0
| 2,975
|
[
"VTK"
] |
a47f752340b04ec32292b307b0637dc0c39366ed405a87f46f372c90565f721c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Unit tests for StructureNL (SNL) format
"""
__author__ = "Anubhav Jain"
__credits__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "ajain@lbl.gov"
__date__ = "2/14/13"
import datetime
import unittest2 as unittest
import numpy as np
from pymatgen import Structure, Molecule
from pymatgen.matproj.snl import StructureNL, HistoryNode, Author
class StructureNLCase(unittest.TestCase):
def setUp(self):
# set up a Structure
self.s = Structure(np.eye(3, 3) * 3, ["Fe"], [[0, 0, 0]])
self.s2 = Structure(np.eye(3, 3) * 3, ["Al"], [[0, 0, 0]])
self.mol = Molecule(["He"], [[0, 0, 0]])
# set up BibTeX strings
self.matproj = "@misc{MaterialsProject,\ntitle = {{Materials " \
"Project}},\nurl = {http://www.materialsproject.org}\n}"
self.pmg = "@article{Ong2013,\n author = {Ong, " \
"Shyue Ping and Richards, William Davidson and Jain, " \
"Anubhav and Hautier, Geoffroy and Kocher, " \
"Michael and Cholia, Shreyas and Gunter, Dan and Chevrier," \
" Vincent L. and Persson, Kristin A. and Ceder, Gerbrand}," \
"\n doi = {10.1016/j.commatsci.2012.10.028}," \
"\n issn = {09270256},\n journal = {Computational " \
"Materials Science},\n month = feb,\n pages = {314--319}," \
"\n publisher = {Elsevier B.V.}," \
"\n title = {{Python Materials Genomics (pymatgen): A " \
"robust, open-source python library for materials " \
"analysis}},\n url = {http://linkinghub.elsevier" \
".com/retrieve/pii/S0927025612006295},\n volume = {68}," \
"\n year = {2013}\n}"
repeat = "REPEAT" * 10000
self.superlong = "@misc{SuperLong,\ntitle = {{" + repeat + "}}}"
self.unicode_title = "@misc{Unicode_Title,\ntitle = {{A \u73ab is a rose}}}"
self.junk = "This is junk text, not a BibTeX reference"
# set up remarks
self.remark_fail = ["This is a really long remark that is clearly invalid and must fail, don't you agree? It would be silly to allow remarks that went on forever and ever."]
# set up some authors
self.hulk = [{"name": "Hulk", "email": "hulk@avengers.com"}]
self.america = "Captain America <captainamerica@avengers.com>"
self.thor = [("Thor", "thor@avengers.com")]
self.duo = "Iron Man <ironman@avengers.com>, " \
"Black Widow <blackwidow@avengers.com>"
# set up HistoryNodes
self.valid_node = HistoryNode("DB 1", "www.db1URLgoeshere.com",
{"db1_id": 12424})
self.valid_node2 = {"name": "DB 2", "url": "www.db2URLgoeshere.com",
"description": {"db2_id": 12424}}
self.invalid_node = {"name": "DB 3",
"url": "http://www.db3isnotavalidnode.com"}
def test_authors(self):
a = StructureNL(self.s, self.hulk, references=self.pmg)
self.assertEqual(a.authors[0].name, "Hulk")
self.assertEqual(a.authors[0].email, "hulk@avengers.com")
a = StructureNL(self.s, self.america, references=self.pmg)
self.assertEqual(a.authors[0].name, "Captain America")
self.assertEqual(a.authors[0].email, "captainamerica@avengers.com")
a = StructureNL(self.s, self.thor, references=self.pmg)
self.assertEqual(a.authors[0].name, "Thor")
self.assertEqual(a.authors[0].email, "thor@avengers.com")
a = StructureNL(self.s, self.duo, references=self.pmg)
self.assertEqual(a.authors[0].name, "Iron Man")
self.assertEqual(a.authors[0].email, "ironman@avengers.com")
self.assertEqual(a.authors[1].name, "Black Widow")
self.assertEqual(a.authors[1].email, "blackwidow@avengers.com")
StructureNL(self.s, self.hulk, references=self.pmg)
def test_references(self):
# junk reference should not work
self.assertRaises(ValueError, StructureNL, self.s, self.hulk,
references=self.junk)
# good references should be ok
StructureNL(self.s, self.hulk, references=self.pmg)
# unicode references should work
StructureNL(self.s, self.hulk, references=self.unicode_title)
# multi-line references should be OK
StructureNL(self.s, self.hulk,
references='\n'.join([self.matproj, self.pmg]))
# super long references are bad
self.assertRaises(ValueError, StructureNL, self.s, self.hulk,
references=self.superlong)
def test_historynodes(self):
a = StructureNL(self.s, self.hulk, history=[self.valid_node])
self.assertEqual(a.history[0].name, "DB 1")
self.assertEqual(a.history[0].url, "www.db1URLgoeshere.com")
self.assertEqual(a.history[0].description, {"db1_id": 12424})
a = StructureNL(self.s, self.hulk,
history=[self.valid_node, self.valid_node2])
self.assertEqual(a.history[1].name, "DB 2")
self.assertEqual(a.history[1].url, "www.db2URLgoeshere.com")
self.assertEqual(a.history[1].description, {"db2_id": 12424})
# invalid nodes should not work
self.assertRaises(Exception, StructureNL, self.s, self.hulk,
history=[self.invalid_node])
# too many nodes should not work
self.assertRaises(ValueError, StructureNL, self.s, self.hulk,
history=[self.valid_node] * 1000)
def test_data(self):
# Structure data is OK due to PMGEncoder/Decoder
a = StructureNL(self.s, self.hulk, data={"_structure": self.s2})
self.assertEqual(a.data["_structure"], self.s2,
'Data storage is broken')
self.assertRaises(ValueError, StructureNL, self.s, self.hulk,
data={"bad_key": 1})
def test_remarks(self):
a = StructureNL(self.s, self.hulk, remarks="string format")
self.assertEqual(a.remarks[0], "string format")
self.assertRaises(ValueError, StructureNL, self.s, self.hulk,
remarks=self.remark_fail)
def test_eq(self):
# test basic Equal()
created_at = datetime.datetime.now()
a = StructureNL(self.s, self.hulk, ['test_project'], self.pmg,
['remark1'], {"_my_data": self.s2},
[self.valid_node, self.valid_node2], created_at)
b = StructureNL(self.s, self.hulk, ['test_project'], self.pmg,
['remark1'], {"_my_data": self.s2},
[self.valid_node, self.valid_node2], created_at)
self.assertEqual(a, b, "__eq__() method is broken! false negative")
# change the created at date, now they are no longer equal
created_at = datetime.datetime.now() + datetime.timedelta(days=-1)
c = StructureNL(self.s, self.hulk, ['test_project'], self.pmg,
['remark1'], {"_my_data": self.s2},
[self.valid_node, self.valid_node2], created_at)
self.assertNotEqual(a, c, "__eq__() method is broken! false positive")
# or try a different structure, those should not be equal
d = StructureNL(self.s2, self.hulk, ['test_project'], self.pmg,
['remark1'], {"_my_data": self.s2},
[self.valid_node, self.valid_node2], created_at)
self.assertNotEqual(a, d, "__eq__() method is broken! false positive")
def test_to_from_dict(self):
# no complicated objects in the 'data' or 'nodes' field
a = StructureNL(self.s, self.hulk, ['test_project'], self.pmg,
['remark1'], {"_my_data": "string"},
[self.valid_node, self.valid_node2])
b = StructureNL.from_dict(a.as_dict())
self.assertEqual(a, b)
# complicated objects in the 'data' and 'nodes' field
complicated_node = {"name": "complicated node",
"url": "www.complicatednodegoeshere.com",
"description": {"structure": self.s2}}
a = StructureNL(self.s, self.hulk, ['test_project'], self.pmg,
['remark1'], {"_my_data": {"structure": self.s2}},
[complicated_node, self.valid_node])
b = StructureNL.from_dict(a.as_dict())
self.assertEqual(a, b,
'to/from dict is broken when object embedding is '
'used! Apparently MontyEncoding is broken...')
#Test molecule
molnl = StructureNL(self.mol, self.hulk, references=self.pmg)
b = StructureNL.from_dict(molnl.as_dict())
self.assertEqual(molnl, b)
def test_from_structures(self):
s1 = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
s2 = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Mn"], [[0, 0, 0]])
remarks = ["unittest"]
authors="Test User <test@materialsproject.com>"
snl_list = StructureNL.from_structures([s1, s2], authors, remarks=remarks)
self.assertEqual(len(snl_list), 2)
snl1 = snl_list[0]
snl2 = snl_list[1]
self.assertEqual(snl1.remarks, remarks)
self.assertEqual(snl2.remarks, remarks)
self.assertEqual(snl1.authors, [Author.parse_author(authors)])
self.assertEqual(snl2.authors, [Author.parse_author(authors)])
if __name__ == '__main__':
unittest.main()
|
aykol/pymatgen
|
pymatgen/matproj/tests/test_snl.py
|
Python
|
mit
| 9,898
|
[
"pymatgen"
] |
421294271be39fd42acec747605fb662f6c5895e806338b88326938f07507513
|
"""
Main class of the phone loop model.
Copyright (C) 2017, Lucas Ondel
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import numpy as np
from bisect import bisect
from itertools import groupby
from scipy.special import logsumexp
from .hmm_utils import create_phone_loop_transition_matrix
from .hmm_utils import create_linear_transition_matrix
from .hmm_utils import forward_backward
from .hmm_utils import viterbi
from .model import EFDStats, DiscreteLatentModel
from ..densities import Dirichlet, NormalGamma, NormalDiag
class PhoneLoop(DiscreteLatentModel):
"""Bayesian Phone Loop.
Bayesian Phone Loop with a Dirichlet prior over the weights.
"""
def create(n_units, n_states, n_comp_per_state, mean, var, random=True,
sample_var=1):
"""Create and initialize a Bayesian Phone Loope Model.
Parameters
----------
n_units : int
Number of acoustic units i.e. phones.
n_states : int
Number of states for each acoustic unit.
n_comp_per_state : int
Number of compent per emission.
mean : numpy.ndarray
Mean of the data set to train on.
var : numpy.ndarray
Variance of the data set to train on.
random : boolean
If True, initialize the mean of the Gaussian posteriors
randomly.
sample_var : float or None
Variance of the for the sampling of the intial mean
parameters. If None (default), use "var".
Returns
-------
model : :class:`PhoneLoop`
A new phone-loop model.
"""
tot_n_states = n_units * n_states
tot_comp = tot_n_states * n_comp_per_state
latent_prior = Dirichlet(np.ones(n_units))
latent_posterior = Dirichlet(np.ones(n_units))
state_priors = [Dirichlet(np.ones(n_comp_per_state))
for _ in range(tot_n_states)]
state_posteriors = [Dirichlet(np.ones(n_comp_per_state))
for _ in range(tot_n_states)]
priors = []
prior_mean = mean.copy()
prior_var = var.copy()
for i in range(tot_comp):
prior = NormalGamma(
prior_mean,
np.ones_like(mean),
np.ones_like(var),
prior_var,
)
priors.append(prior)
components = []
if sample_var is not None:
s_var = np.ones_like(prior_var) * sample_var
else:
s_var = var
cov = np.diag(s_var)
for i in range(tot_comp):
if random:
s_mean = np.random.multivariate_normal(mean, cov)
else:
s_mean = prior_mean.copy()
posterior = NormalGamma(
s_mean,
np.ones_like(mean),
np.ones_like(var),
prior_var
)
components.append(NormalDiag(priors[i], posterior))
return PhoneLoop(latent_prior, latent_posterior, state_priors,
state_posteriors, components)
def __init__(self, latent_prior, latent_posterior, state_priors,
state_posteriors, components):
DiscreteLatentModel.__init__(self, latent_prior, latent_posterior,
components)
self.n_units = len(latent_prior.natural_params)
self.n_states = len(state_priors) // self.n_units
self.n_comp_per_states = len(state_priors[0].natural_params)
self.state_priors = state_priors
self.state_posteriors = state_posteriors
# Will be initialized later.
self.init_prob = None
self.trans_mat = None
self.init_states = None
self.final_states = None
self.post_update()
def post_update(self):
DiscreteLatentModel.post_update(self)
# Update the states' weights.
self.state_log_weights = np.zeros((self.n_units * self.n_states,
self.n_comp_per_states))
for idx in range(self.n_units * self.n_states):
self.state_log_weights[idx, :] = \
self.state_posteriors[idx].grad_log_partition
# Update the log transition matrix.
unigram_lm = np.exp(self.latent_posterior.grad_log_partition)
unigram_lm /= unigram_lm.sum()
self.init_prob = unigram_lm
self.trans_mat, self.init_states, self.final_states = \
create_phone_loop_transition_matrix(self.n_units, self.n_states,
unigram_lm)
def _get_state_llh(self, s_stats):
# Evaluate the Gaussian log-likelihoods.
exp_llh = self.components_exp_llh(s_stats)
# Reshape the log-likelihood to get the per-state and per
# component log-likelihood.
r_exp_llh = exp_llh.reshape(self.n_units * self.n_states,
self.n_comp_per_states, -1)
# Emission log-likelihood.
c_given_s_llh = r_exp_llh + self.state_log_weights[:, :, np.newaxis]
state_llh = logsumexp(c_given_s_llh, axis=1).T
c_given_s_resps = np.exp(c_given_s_llh - \
state_llh.T[:, np.newaxis, :])
return state_llh, c_given_s_resps
def _units_stats(self, c_llhs, log_alphas, log_betas):
log_units_stats = np.zeros(self.n_units)
norm = logsumexp(log_alphas[-1] + log_betas[-1])
log_A = np.log(self.trans_mat.toarray())
for n_unit in range(self.n_units):
index1 = n_unit * self.n_states + 1
index2 = index1 + 1
log_prob_trans = log_A[index1, index2]
log_q_zn1_zn2 = log_alphas[:-1, index1] + c_llhs[1:, index2] + \
log_prob_trans + log_betas[1:, index2]
log_q_zn1_zn2 -= norm
log_units_stats[n_unit] = logsumexp(log_q_zn1_zn2)
return np.exp(log_units_stats)
def decode(self, s_stats, state_path=False):
state_llh, c_given_s_resps = self._get_state_llh(s_stats)
path = viterbi(
self.init_prob,
self.trans_mat,
self.init_states,
self.final_states,
state_llh
)
if not state_path:
path = [bisect(self.init_states, state) for state in path]
path = [x[0] - 1 for x in groupby(path)]
return path
def set_linear_graph(self, alignments):
"""Change the recognition structure to match a sequence of the
unit.
Parameters
----------
alignments : list of int
List of acoustic units.
Returns
-------
backup : tuple
Backup of the former parameters of the phone-loop model.
"""
# Save the current parameters.
backup = (
self.n_units,
self.init_prob,
self.trans_mat,
self.init_states,
self.final_states,
self.components
)
# Set the new parameters of the linear recognition model.
self.n_units = len(alignments)
self.init_prob = np.ones(1, dtype=float)
self.trans_mat = create_linear_transition_matrix(len(alignments),
self.n_states)
self.init_states = [0]
self.final_states = [self.trans_mat.shape[0] - 1]
self.components = []
for i in alignments:
for j in range(self.n_states * self.n_comp_per_states):
idx = i * (self.n_states * self.n_comp_per_states) + j
self.components.append(backup[-1][idx])
self.state_log_weights = np.zeros((self.n_units * self.n_states,
self.n_comp_per_states))
DiscreteLatentModel.post_update(self)
return backup
def unset_linear_graph(self, backup):
"""Restore the previous parameters.
Parameters
----------
parameters : tuple
Backup of the parameters to restore.
"""
self.n_units = backup[0]
self.init_prob = backup[1]
self.trans_mat = backup[2]
self.init_states = backup[3]
self.final_states = backup[4]
self.components = backup[5]
self.post_update()
def remap_acc_stats(self, alignments, units_stats, state_stats,
gauss_stats, state_resps, gauss_resps):
"""Remap the accumulated statistics to their original form.
Remap the statistics accumulated with a linear model to match
the form expected by the standard phone-loop model.
Parameters
----------
alignments : list
Units alignments.
units_stats : numpy.ndarray
Acoustic units counts.
state_stats : numpy.ndarray
Count for the per-state mixture elements.
gauss_stats : numpy.ndarray
Gaussian accumulated statistics.
state_resps : numpy.ndarray
HMM states' posteriorgram.
Returns
-------
new_units_stats : numpy.ndarray
Acoustic units counts.
new_state_stats : numpy.ndarray
Count for the per-state mixture elements.
new_gauss_stats : numpy.ndarray
Gaussian accumulated statistics.
new_state_resps : numpy.ndarray
HMM states' posteriorgram.
"""
new_units_stats = np.zeros(self.n_units)
for i, unit in enumerate(alignments):
new_units_stats[unit] += units_stats[i]
new_state_stats = np.zeros((self.n_units * self.n_states,
self.n_comp_per_states))
for i, unit in enumerate(alignments):
for j in range(self.n_states):
idx1 = unit * self.n_states + j
idx2 = i * self.n_states + j
new_state_stats[idx1, :] += state_stats[idx2, :]
new_gauss_stats = np.zeros((len(self.components),
gauss_stats.shape[1]))
for i, unit in enumerate(alignments):
for j in range(self.n_states * self.n_comp_per_states):
idx1 = unit * (self.n_states * self.n_comp_per_states) + j
idx2 = i * (self.n_states * self.n_comp_per_states) + j
new_gauss_stats[idx1, :] += gauss_stats[idx2, :]
new_state_resps = np.zeros((self.n_units * self.n_states,
state_resps.shape[1]))
for i, unit in enumerate(alignments):
for j in range(self.n_states):
idx1 = unit * self.n_states + j
idx2 = i * self.n_states + j
new_state_resps[idx1, :] += state_resps[idx2, :]
n_gauss = self.n_units * self.n_states * self.n_comp_per_states
new_gauss_resps = np.zeros((n_gauss, state_resps.shape[1]))
for i, unit in enumerate(alignments):
for j in range(self.n_states * self.n_comp_per_states):
idx1 = unit * (self.n_states * self.n_comp_per_states) + j
idx2 = i * (self.n_states * self.n_comp_per_states) + j
new_gauss_resps[idx1, :] += gauss_resps[idx2, :]
return new_units_stats, new_state_stats, new_gauss_stats, \
new_state_resps, new_gauss_resps
# DiscreteLatentModel interface.
# -----------------------------------------------------------------
def kl_div_posterior_prior(self):
"""Kullback-Leibler divergence between prior /posterior.
Returns
-------
kl_div : float
Kullback-Leibler divergence.
"""
retval = DiscreteLatentModel.kl_div_posterior_prior(self)
for idx, post in enumerate(self.state_posteriors):
retval += post.kl_div(self.state_priors[idx])
return retval
def get_posteriors(self, s_stats, ac_scale=1.0, accumulate=False,
alignments=None, gauss_posteriors=False):
# If the alignments are provided, we use a different regconition
# structure.
if alignments is not None:
backup = self.set_linear_graph(alignments)
# Compute the per-state expected log-likelihood.
state_llh, c_given_s_resps = self._get_state_llh(s_stats)
# Forward-Bacward algorithm.
log_alphas, log_betas = forward_backward(
self.init_prob,
self.trans_mat,
self.init_states,
self.final_states,
ac_scale * state_llh.T
)
# Compute the posteriors.
log_q_Z = (log_alphas + log_betas).T
log_norm = logsumexp(log_q_Z, axis=0)
state_resps = np.exp((log_q_Z - log_norm))
if accumulate:
tot_resps = state_resps[:, np.newaxis, :] * c_given_s_resps
gauss_resps = tot_resps.reshape(-1, tot_resps.shape[-1])
if self.n_states > 1 :
units_stats = self._units_stats(state_llh, log_alphas,
log_betas)
else:
units_stats = state_resps.sum(axis=0)
state_stats = tot_resps.sum(axis=2)
gauss_stats = gauss_resps.dot(s_stats)
# If we use a linear recognition model, remap the
# accumulated statistics to their actual values.
if alignments is not None:
self.unset_linear_graph(backup)
units_stats, state_stats, gauss_stats, state_resps, \
gauss_resps = \
self.remap_acc_stats(alignments, units_stats, state_stats,
gauss_stats, state_resps, gauss_resps)
acc_stats = EFDStats([units_stats, state_stats, gauss_stats])
if gauss_posteriors:
retval_resps = gauss_resps
else:
retval_resps = state_resps
return retval_resps, log_norm[-1], acc_stats
if alignments is not None:
self.unset_linear_graph(backup)
return state_resps, log_norm[-1]
def natural_grad_update(self, acc_stats, lrate):
"""Natural gradient update."""
# Update unigram language model.
grad = self.latent_prior.natural_params + acc_stats[0]
grad -= self.latent_posterior.natural_params
self.latent_posterior.natural_params = \
self.latent_posterior.natural_params + lrate * grad
# Update the states' weights.
for idx, post in enumerate(self.state_posteriors):
grad = self.state_priors[idx].natural_params + acc_stats[1][idx]
grad -= post.natural_params
post.natural_params = post.natural_params + lrate * grad
# Update Gaussian components.
for idx, stats in enumerate(acc_stats[2]):
comp = self.components[idx]
grad = comp.prior.natural_params + stats
grad -= comp.posterior.natural_params
comp.posterior.natural_params = \
comp.posterior.natural_params + lrate * grad
self.post_update()
# PersistentModel interface implementation.
# -----------------------------------------------------------------
def to_dict(self):
return {
'class':self.__class__,
'n_units': self.n_units,
'n_states': self.n_states,
'n_comp_per_states': self.n_comp_per_states,
'latent_prior_class': self.latent_prior.__class__,
'latent_prior_data': self.latent_prior.to_dict(),
'latent_posterior_class': self.latent_posterior.__class__,
'latent_posterior_data': self.latent_posterior.to_dict(),
'state_prior_class': self.state_priors[0].__class__,
'state_prior_data': [state_prior.to_dict() for state_prior in
self.state_priors],
'state_posterior_class': self.state_posteriors[0].__class__,
'state_posterior_data': [state_posterior.to_dict()
for state_posterior in
self.state_posteriors],
'components_class': self.components[0].__class__,
'components': [comp.to_dict() for comp in self.components]
}
@classmethod
def load_from_dict(cls, model_data):
model = cls.__new__(model_data['class'])
model.n_units = model_data['n_units']
model.n_states = model_data['n_states']
model.n_comp_per_states = model_data['n_comp_per_states']
latent_prior_cls = model_data['latent_prior_class']
latent_prior_data = model_data['latent_prior_data']
model.latent_prior = \
latent_prior_cls.load_from_dict(latent_prior_data)
latent_posterior_cls = model_data['latent_posterior_class']
latent_posterior_data = model_data['latent_posterior_data']
model.latent_posterior = \
latent_posterior_cls.load_from_dict(latent_posterior_data)
state_prior_cls = model_data['state_prior_class']
state_priors = []
for data in model_data['state_prior_data']:
state_priors.append(
state_prior_cls.load_from_dict(data)
)
model.state_priors = state_priors
state_posterior_cls = model_data['state_posterior_class']
state_posteriors = []
for data in model_data['state_posterior_data']:
state_posteriors.append(
state_posterior_cls.load_from_dict(data)
)
model.state_posteriors = state_posteriors
components = []
components_class = model_data['components_class']
for comp_data in model_data['components']:
comp = components_class.load_from_dict(comp_data)
components.append(comp)
model.components = components
model.post_update()
return model
# -----------------------------------------------------------------
|
amdtkdev/amdtk
|
amdtk/models/phone_loop.py
|
Python
|
mit
| 19,108
|
[
"Gaussian"
] |
e8a557d7fb5b4e59d7f08c5fe0dc90ee1e185dd292c38b38104f96612d8787ff
|
"""
"""
from wheezy.http import WSGIApplication
from wheezy.http.middleware import http_cache_middleware_factory
from wheezy.web.middleware import bootstrap_defaults
from wheezy.web.middleware import http_error_middleware_factory
from wheezy.web.middleware import path_routing_middleware_factory
from config import options
from urls import all_urls
main = WSGIApplication(
middleware=[
bootstrap_defaults(url_mapping=all_urls),
http_cache_middleware_factory,
http_error_middleware_factory,
path_routing_middleware_factory
],
options=options)
if __name__ == '__main__':
from wsgiref.handlers import BaseHandler
from wsgiref.simple_server import make_server
try:
print('Visit http://localhost:8080/')
BaseHandler.http_version = '1.1'
make_server('', 8080, main).serve_forever()
except KeyboardInterrupt:
pass
print('\nThanks!')
|
lajto/ww-i18n
|
src/app.py
|
Python
|
gpl-3.0
| 928
|
[
"VisIt"
] |
17dc0c9f35605175aeb9adfee713448348f5b7e8008491cfbc75316e31ff5564
|
#!/usr/bin/python
"""Test of line navigation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Line Down",
["BRAILLE LINE: 'Home News Projects Art Support Development Community'",
" VISIBLE: 'Home News Projects Art Support D', cursor=1",
"SPEECH OUTPUT: 'Home link.'",
"SPEECH OUTPUT: 'News link.'",
"SPEECH OUTPUT: 'Projects link.'",
"SPEECH OUTPUT: 'Art link.'",
"SPEECH OUTPUT: 'Support link.'",
"SPEECH OUTPUT: 'Development link.'",
"SPEECH OUTPUT: 'Community link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'live.gnome.org h1 Search $l Titles push button Text push button'",
" VISIBLE: 'live.gnome.org h1 Search $l Tit', cursor=1",
"SPEECH OUTPUT: 'live.gnome.org heading level 1'",
"SPEECH OUTPUT: 'entry Search'",
"SPEECH OUTPUT: 'Titles push button'",
"SPEECH OUTPUT: 'Text push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'Home RecentChanges FindPage HelpContents Orca'",
" VISIBLE: 'Home RecentChanges FindPage Help', cursor=1",
"SPEECH OUTPUT: 'Home link.'",
"SPEECH OUTPUT: 'RecentChanges link.'",
"SPEECH OUTPUT: 'FindPage link.'",
"SPEECH OUTPUT: 'HelpContents link.'",
"SPEECH OUTPUT: 'Orca link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: 'en Español'",
" VISIBLE: 'en Español', cursor=1",
"SPEECH OUTPUT: 'en Español link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'Home | Download/Installation | Configuration/Use | Accessible Applications | Mailing List \('",
" VISIBLE: 'Home | Download/Installation | C', cursor=1",
"SPEECH OUTPUT: 'Home link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Download/Installation link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Configuration/Use link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Accessible Applications link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Mailing List link.'",
"SPEECH OUTPUT: '('"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: 'Archives\) | FAQ | DocIndex'",
" VISIBLE: 'Archives\) | FAQ | DocIndex', cursor=1",
"SPEECH OUTPUT: 'Archives link.'",
"SPEECH OUTPUT: ') |'",
"SPEECH OUTPUT: 'FAQ link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'DocIndex link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Line Down",
["BRAILLE LINE: 'Welcome to Orca! h1'",
" VISIBLE: 'Welcome to Orca! h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to Orca! heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Line Down",
["BRAILLE LINE: 'Orca Logo'",
" VISIBLE: 'Orca Logo', cursor=1",
"SPEECH OUTPUT: 'Orca Logo link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Line Down",
["BRAILLE LINE: 'HOT HOT HOT: Notes on access to Firefox 3.0'",
" VISIBLE: 'HOT HOT HOT: Notes on access to ', cursor=1",
"SPEECH OUTPUT: 'HOT HOT HOT: Notes on'",
"SPEECH OUTPUT: 'access to Firefox 3.0 link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. Line Down",
["BRAILLE LINE: 'Contents'",
" VISIBLE: 'Contents', cursor=1",
"SPEECH OUTPUT: 'Contents'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. Line Down",
["BRAILLE LINE: '1. Welcome to Orca!'",
" VISIBLE: '1. Welcome to Orca!', cursor=1",
"SPEECH OUTPUT: '1.'",
"SPEECH OUTPUT: 'Welcome to Orca! link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. Line Down",
["BRAILLE LINE: '2. About'",
" VISIBLE: '2. About', cursor=1",
"SPEECH OUTPUT: '2.'",
"SPEECH OUTPUT: 'About link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"13. Line Down",
["BRAILLE LINE: '3. Audio Guides'",
" VISIBLE: '3. Audio Guides', cursor=1",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'Audio Guides link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"14. Line Down",
["BRAILLE LINE: '4. Download/Installation'",
" VISIBLE: '4. Download/Installation', cursor=1",
"SPEECH OUTPUT: '4.'",
"SPEECH OUTPUT: 'Download/Installation link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"15. Line Down",
["BRAILLE LINE: '5. Configuration/Use'",
" VISIBLE: '5. Configuration/Use', cursor=1",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'Configuration/Use link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"16. Line Down",
["BRAILLE LINE: '6. Accessible Applications'",
" VISIBLE: '6. Accessible Applications', cursor=1",
"SPEECH OUTPUT: '6.'",
"SPEECH OUTPUT: 'Accessible Applications link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"17. Line Down",
["BRAILLE LINE: '7. How Can I Help?'",
" VISIBLE: '7. How Can I Help?', cursor=1",
"SPEECH OUTPUT: '7.'",
"SPEECH OUTPUT: 'How Can I Help? link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"18. Line Down",
["BRAILLE LINE: '8. More Information'",
" VISIBLE: '8. More Information', cursor=1",
"SPEECH OUTPUT: '8.'",
"SPEECH OUTPUT: 'More Information link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"19. Line Down",
["BRAILLE LINE: 'About h1'",
" VISIBLE: 'About h1', cursor=1",
"SPEECH OUTPUT: 'About heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"20. Line Down",
["BRAILLE LINE: 'Orca is a free, open source, flexible, extensible, and'",
" VISIBLE: 'Orca is a free, open source, fle', cursor=1",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"21. Line Down",
["BRAILLE LINE: 'powerful assistive technology for people with visual'",
" VISIBLE: 'powerful assistive technology fo', cursor=1",
"SPEECH OUTPUT: 'powerful assistive technology for people with visual'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"22. Line Down",
["BRAILLE LINE: 'impairments. Using various combinations of speech'",
" VISIBLE: 'impairments. Using various combi', cursor=1",
"SPEECH OUTPUT: 'impairments. Using various combinations of speech'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"23. Line Down",
["BRAILLE LINE: 'synthesis, braille, and magnification, Orca helps provide'",
" VISIBLE: 'synthesis, braille, and magnific', cursor=1",
"SPEECH OUTPUT: 'synthesis, braille, and magnification, Orca helps provide'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"24. Line Down",
["BRAILLE LINE: 'access to applications and toolkits that support the AT-SPI'",
" VISIBLE: 'access to applications and toolk', cursor=1",
"SPEECH OUTPUT: 'access to applications and toolkits that support the AT-SPI'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"25. Line Down",
["BRAILLE LINE: '(e.g., the GNOME desktop). The development of Orca has'",
" VISIBLE: '(e.g., the GNOME desktop). The d', cursor=1",
"SPEECH OUTPUT: '(e.g., the GNOME desktop). The development of Orca has'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"26. Line Down",
["BRAILLE LINE: 'been led by the Accessibility Program Office of Sun'",
" VISIBLE: 'been led by the Accessibility Pr', cursor=1",
"SPEECH OUTPUT: 'been led by the'",
"SPEECH OUTPUT: 'Accessibility Program Office of Sun'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"27. Line Down",
["BRAILLE LINE: 'Microsystems, Inc. with contributions from many'",
" VISIBLE: 'Microsystems, Inc. with contribu', cursor=1",
"SPEECH OUTPUT: 'Microsystems, Inc.'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'with'",
"SPEECH OUTPUT: 'contributions from many'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"28. Line Down",
["BRAILLE LINE: 'community members.'",
" VISIBLE: 'community members.', cursor=1",
"SPEECH OUTPUT: 'community members'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"29. Line Down",
["BRAILLE LINE: 'The complete list of work to do, including bugs and feature requests, along with known'",
" VISIBLE: 'The complete list of work to do,', cursor=1",
"SPEECH OUTPUT: 'The complete list of work to do, including bugs and feature requests, along with known'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"30. Line Down",
["BRAILLE LINE: 'problems in other components, is maintained in Bugzilla \(please see our notes on how we'",
" VISIBLE: 'problems in other components, is', cursor=1",
"SPEECH OUTPUT: 'problems in other components, is maintained in'",
"SPEECH OUTPUT: 'Bugzilla link.'",
"SPEECH OUTPUT: '\(please see our'",
"SPEECH OUTPUT: 'notes on how we'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"31. Line Down",
["BRAILLE LINE: 'use Bugzilla\).'",
" VISIBLE: 'use Bugzilla\).', cursor=1",
"SPEECH OUTPUT: 'use Bugzilla'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"32. Line Down",
["BRAILLE LINE: 'Please join and participate on the Orca mailing list (archives): it's a helpful, kind, and'",
" VISIBLE: 'Please join and participate on t', cursor=1",
"SPEECH OUTPUT: 'Please join and participate on the'",
"SPEECH OUTPUT: 'Orca mailing list link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'archives link.'",
"SPEECH OUTPUT: '): it's a helpful, kind, and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"33. Line Down",
["BRAILLE LINE: 'productive environment composed of users and developers.'",
" VISIBLE: 'productive environment composed ', cursor=1",
"SPEECH OUTPUT: 'productive environment composed of users and developers.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"34. Line Down",
["BRAILLE LINE: 'Audio Guides h1'",
" VISIBLE: 'Audio Guides h1', cursor=1",
"SPEECH OUTPUT: 'Audio Guides heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"35. Line Down",
["BRAILLE LINE: 'Darragh Ó Héiligh has created several audio guides for Orca. This is a fantastic'",
" VISIBLE: 'Darragh Ó Héiligh has created se', cursor=1",
"SPEECH OUTPUT: 'Darragh Ó Héiligh link.'",
"SPEECH OUTPUT: 'has created several audio guides for Orca. This is a fantastic'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"36. Line Down",
["BRAILLE LINE: 'contribution (THANKS!)!!! The audio guides can be found at http://www.digitaldarragh.com'",
" VISIBLE: 'contribution (THANKS!)!!! The au', cursor=1",
"SPEECH OUTPUT: 'contribution (THANKS!)!!! The audio guides can be found at'",
"SPEECH OUTPUT: 'http://www.digitaldarragh.com'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"37. Line Down",
["BRAILLE LINE: '/linuxat.asp and include the following:'",
" VISIBLE: '/linuxat.asp and include the fol', cursor=1",
"SPEECH OUTPUT: '/linuxat.asp'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'and include the following:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"38. Line Down",
["BRAILLE LINE: '• Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
" VISIBLE: '• Walk through of the installati', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Walk through of the installation of Ubuntu 7.4. Very helpful tutorial link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"39. Line Down",
["BRAILLE LINE: '• Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
" VISIBLE: '• Review of Fedora 7 and the Orc', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"40. Line Down",
["BRAILLE LINE: '• Guide to installing the latest versions of Firefox and Orca'",
" VISIBLE: '• Guide to installing the latest', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Guide to installing the latest versions of Firefox and Orca link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"41. Line Down",
["BRAILLE LINE: 'Download/Installation h1'",
" VISIBLE: 'Download/Installation h1', cursor=1",
"SPEECH OUTPUT: 'Download/Installation heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"42. Line Down",
["BRAILLE LINE: 'As of GNOME 2.16, Orca is a part of the GNOME platform. As a result, Orca is already'",
" VISIBLE: 'As of GNOME 2.16, Orca is a part', cursor=1",
"SPEECH OUTPUT: 'As of GNOME 2.16, Orca is a part of the GNOME platform. As a result, Orca is already'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"43. Line Down",
["BRAILLE LINE: 'provided by default on a number of operating system distributions, including Open Solaris'",
" VISIBLE: 'provided by default on a number ', cursor=1",
"SPEECH OUTPUT: 'provided by default on a number of operating system distributions, including'",
"SPEECH OUTPUT: 'Open Solaris link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"44. Line Down",
["BRAILLE LINE: 'and Ubuntu.'",
" VISIBLE: 'and Ubuntu.', cursor=1",
"SPEECH OUTPUT: 'and'",
"SPEECH OUTPUT: 'Ubuntu link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"45. Line Down",
["BRAILLE LINE: 'Please also refer to the Download/Installation page for detailed information on various'",
" VISIBLE: 'Please also refer to the Downloa', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Download/Installation page link.'",
"SPEECH OUTPUT: 'for detailed information on various'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"46. Line Down",
["BRAILLE LINE: 'distributions as well as installing Orca directly from source.'",
" VISIBLE: 'distributions as well as install', cursor=1",
"SPEECH OUTPUT: 'distributions as well as installing Orca directly from source.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"47. Line Down",
["BRAILLE LINE: 'Configuration/Use h1'",
" VISIBLE: 'Configuration/Use h1', cursor=1",
"SPEECH OUTPUT: 'Configuration/Use heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"48. Line Down",
["BRAILLE LINE: 'The command to run orca is orca. You can enter this command by pressing Alt+F2 when'",
" VISIBLE: 'The command to run orca is orca.', cursor=1",
"SPEECH OUTPUT: 'The command to run orca is orca. You can enter this command by pressing Alt+F2 when'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"49. Line Down",
["BRAILLE LINE: 'logged in, waiting for a second or so, then typing orca and pressing return. Orca is'",
" VISIBLE: 'logged in, waiting for a second ', cursor=1",
"SPEECH OUTPUT: 'logged in, waiting for a second or so, then typing orca and pressing return. Orca is'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"50. Line Down",
["BRAILLE LINE: 'designed to present information as you navigate the desktop using the built-in navigation'",
" VISIBLE: 'designed to present information ', cursor=1",
"SPEECH OUTPUT: 'designed to present information as you navigate the desktop using the'",
"SPEECH OUTPUT: 'built-in navigation'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"51. Line Down",
["BRAILLE LINE: 'mechanisms of GNOME. These navigation mechanisms are consistent across most'",
" VISIBLE: 'mechanisms of GNOME. These navig', cursor=1",
"SPEECH OUTPUT: 'mechanisms of GNOME'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '. These navigation mechanisms are consistent across most'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"52. Line Down",
["BRAILLE LINE: 'desktop applications.'",
" VISIBLE: 'desktop applications.', cursor=1",
"SPEECH OUTPUT: 'desktop applications.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"53. Line Down",
["BRAILLE LINE: 'You may sometimes wish to control Orca itself, such as bringing up the Orca Configuration'",
" VISIBLE: 'You may sometimes wish to contro', cursor=1",
"SPEECH OUTPUT: 'You may sometimes wish to control Orca itself, such as bringing up the'",
"SPEECH OUTPUT: 'Orca Configuration'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"54. Line Down",
["BRAILLE LINE: 'GUI (accessed by pressing Insert+Space when Orca is running) and for using flat review'",
" VISIBLE: 'GUI (accessed by pressing Insert', cursor=1",
"SPEECH OUTPUT: 'GUI'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '(accessed by pressing Insert+Space when Orca is running) and for using flat review'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"55. Line Down",
["BRAILLE LINE: 'mode to examine a window. Refer to Orca Keyboard Commands (Laptop Layout) for more'",
" VISIBLE: 'mode to examine a window. Refer ', cursor=1",
"SPEECH OUTPUT: 'mode to examine a window. Refer to'",
"SPEECH OUTPUT: 'Orca Keyboard Commands link.'",
"SPEECH OUTPUT: '(Laptop Layout) link.'",
"SPEECH OUTPUT: 'for more'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"56. Line Down",
["BRAILLE LINE: 'information on Orca-specific keyboard commands. The Orca Configuration GUI also'",
" VISIBLE: 'information on Orca-specific key', cursor=1",
"SPEECH OUTPUT: 'information on Orca-specific keyboard commands. The'",
"SPEECH OUTPUT: 'Orca Configuration GUI link.'",
"SPEECH OUTPUT: 'also'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"57. Line Down",
["BRAILLE LINE: 'includes a \"Key Bindings\" tab that allows you to get a complete list of Orca key bindings.'",
" VISIBLE: 'includes a \"Key Bindings\" tab th', cursor=1",
"SPEECH OUTPUT: 'includes a \"Key Bindings\" tab that allows you to get a complete list of Orca key bindings.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"58. Line Down",
["BRAILLE LINE: 'Please also refer to the Configuration/Use page for detailed information.'",
" VISIBLE: 'Please also refer to the Configu', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Configuration/Use page link.'",
"SPEECH OUTPUT: 'for detailed information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"59. Line Down",
["BRAILLE LINE: 'Accessible Applications h1'",
" VISIBLE: 'Accessible Applications h1', cursor=1",
"SPEECH OUTPUT: 'Accessible Applications heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"60. Line Down",
["BRAILLE LINE: 'Orca is designed to work with applications and toolkits that support the assistive'",
" VISIBLE: 'Orca is designed to work with ap', cursor=1",
"SPEECH OUTPUT: 'Orca is designed to work with applications and toolkits that support the assistive'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"61. Line Down",
["BRAILLE LINE: 'technology service provider interface (AT-SPI). This includes the GNOME desktop and its'",
" VISIBLE: 'technology service provider inte', cursor=1",
"SPEECH OUTPUT: 'technology service provider interface (AT-SPI). This includes the GNOME desktop and its'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"62. Line Down",
["BRAILLE LINE: 'applications, OpenOffice, Firefox, and the Java platform. Some applications work better'",
" VISIBLE: 'applications, OpenOffice, Firefo', cursor=1",
"SPEECH OUTPUT: 'applications,'",
"SPEECH OUTPUT: 'OpenOffice link.'",
"SPEECH OUTPUT: ', Firefox, and the Java platform. Some applications work better'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"63. Line Down",
["BRAILLE LINE: 'than others, however, and the Orca community continually works to provide compelling'",
" VISIBLE: 'than others, however, and the Or', cursor=1",
"SPEECH OUTPUT: 'than others, however, and the Orca community continually works to provide compelling'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"64. Line Down",
["BRAILLE LINE: 'access to more and more applications.'",
" VISIBLE: 'access to more and more applicat', cursor=1",
"SPEECH OUTPUT: 'access to more and more applications.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"65. Line Down",
["BRAILLE LINE: 'On the Accessible Applications page, you will find a growing list of information regarding'",
" VISIBLE: 'On the Accessible Applications p', cursor=1",
"SPEECH OUTPUT: 'On the'",
"SPEECH OUTPUT: 'Accessible Applications page link.'",
"SPEECH OUTPUT: ', you will find a growing list of information regarding'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"66. Line Down",
["BRAILLE LINE: 'various applications that can be accessed with Orca as well as tips and tricks for using'",
" VISIBLE: 'various applications that can be', cursor=1",
"SPEECH OUTPUT: 'various applications that can be accessed with Orca as well as tips and tricks for using'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"67. Line Down",
["BRAILLE LINE: 'them. The list is not to be a conclusive list of all applications. Rather, the goal is to provide a'",
" VISIBLE: 'them. The list is not to be a co', cursor=1",
"SPEECH OUTPUT: 'them. The list is not to be a conclusive list of all applications. Rather, the goal is to provide a'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"68. Line Down",
["BRAILLE LINE: 'repository within which users can share experiences regarding applications they have'",
" VISIBLE: 'repository within which users ca', cursor=1",
"SPEECH OUTPUT: 'repository within which users can share experiences regarding applications they have'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"69. Line Down",
["BRAILLE LINE: 'tested.'",
" VISIBLE: 'tested.', cursor=1",
"SPEECH OUTPUT: 'tested.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"70. Line Down",
["BRAILLE LINE: 'See also the Application Specific Settings page for how to configure settings specific to an'",
" VISIBLE: 'See also the Application Specifi', cursor=1",
"SPEECH OUTPUT: 'See also the'",
"SPEECH OUTPUT: 'Application Specific Settings link.'",
"SPEECH OUTPUT: 'page for how to configure settings specific to an'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"71. Line Down",
["BRAILLE LINE: 'application.'",
" VISIBLE: 'application.', cursor=1",
"SPEECH OUTPUT: 'application.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"72. Line Down",
["BRAILLE LINE: 'Please also refer to the Accessible Applications page for detailed information.'",
" VISIBLE: 'Please also refer to the Accessi', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Accessible Applications page link.'",
"SPEECH OUTPUT: 'for detailed information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"73. Line Down",
["BRAILLE LINE: 'How Can I Help? h1'",
" VISIBLE: 'How Can I Help? h1', cursor=1",
"SPEECH OUTPUT: 'How Can I Help? heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"74. Line Down",
["BRAILLE LINE: 'There's a bunch you can do! Please refer to the How Can I Help page for detailed'",
" VISIBLE: 'There's a bunch you can do! Plea', cursor=1",
"SPEECH OUTPUT: 'There's a bunch you can do! Please refer to the'",
"SPEECH OUTPUT: 'How Can I Help page link.'",
"SPEECH OUTPUT: 'for detailed'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"75. Line Down",
["BRAILLE LINE: 'information.'",
" VISIBLE: 'information.', cursor=1",
"SPEECH OUTPUT: 'information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"76. Line Down",
["BRAILLE LINE: 'More Information h1'",
" VISIBLE: 'More Information h1', cursor=1",
"SPEECH OUTPUT: 'More Information heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"77. Line Down",
["BRAILLE LINE: '• Frequently Asked Questions: FAQ'",
" VISIBLE: '• Frequently Asked Questions: FA', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Frequently Asked Questions:'",
"SPEECH OUTPUT: 'FAQ link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"78. Line Down",
["BRAILLE LINE: '• Mailing list: orca-list@gnome.org (Archives)'",
" VISIBLE: '• Mailing list: orca-list@gnome.', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Mailing list:'",
"SPEECH OUTPUT: 'orca-list@gnome.org link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"79. Line Down",
["BRAILLE LINE: '• Bug database: GNOME Bug Tracking System (Bugzilla) (current bug list)'",
" VISIBLE: '• Bug database: GNOME Bug Tracki', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Bug database:'",
"SPEECH OUTPUT: 'GNOME Bug Tracking System (Bugzilla) link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'current bug list link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"80. Line Down",
["BRAILLE LINE: '• Design documents: Orca Documentation Series'",
" VISIBLE: '• Design documents: Orca Documen', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Design documents:'",
"SPEECH OUTPUT: 'Orca Documentation Series link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"81. Line Down",
["BRAILLE LINE: '• Dive Into Python, Mark Pilgrim'",
" VISIBLE: '• Dive Into Python, Mark Pilgrim', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Dive Into Python, Mark Pilgrim link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"82. Line Down",
["BRAILLE LINE: '• Python in a Nutshell, Alex Martelli'",
" VISIBLE: '• Python in a Nutshell, Alex Mar', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Python in a Nutshell, Alex Martelli link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"83. Line Down",
["BRAILLE LINE: '• Python Pocket Reference, Mark Lutz'",
" VISIBLE: '• Python Pocket Reference, Mark ', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Python Pocket Reference, Mark Lutz link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"84. Line Down",
["BRAILLE LINE: 'separator'",
" VISIBLE: 'separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"85. Line Down",
["BRAILLE LINE: 'The information on this page and the other Orca-related pages on this site are distributed'",
" VISIBLE: 'The information on this page and', cursor=1",
"SPEECH OUTPUT: 'The information on this page and the other Orca-related pages on this site are distributed'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"86. Line Down",
["BRAILLE LINE: 'in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied'",
" VISIBLE: 'in the hope that it will be usef', cursor=1",
"SPEECH OUTPUT: 'in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"87. Line Down",
["BRAILLE LINE: 'warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'",
" VISIBLE: 'warranty of MERCHANTABILITY or F', cursor=1",
"SPEECH OUTPUT: 'warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"88. Line Down",
["BRAILLE LINE: 'separator'",
" VISIBLE: 'separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"89. Line Down",
["BRAILLE LINE: 'CategoryAccessibility'",
" VISIBLE: 'CategoryAccessibility', cursor=1",
"SPEECH OUTPUT: 'CategoryAccessibility link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"90. Line Down",
["BRAILLE LINE: 'Orca (last edited 2007-12-07 22:09:22 by WillieWalker)'",
" VISIBLE: 'Orca (last edited 2007-12-07 22:', cursor=1",
"SPEECH OUTPUT: 'Orca (last edited 2007-12-07 22:09:22 by'",
"SPEECH OUTPUT: 'WillieWalker link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"91. Line Down",
["BRAILLE LINE: 'User h3'",
" VISIBLE: 'User h3', cursor=1",
"SPEECH OUTPUT: 'User heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"92. Line Down",
["BRAILLE LINE: 'Login'",
" VISIBLE: 'Login', cursor=1",
"SPEECH OUTPUT: 'Login link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"93. Line Down",
["BRAILLE LINE: 'Page h3'",
" VISIBLE: 'Page h3', cursor=1",
"SPEECH OUTPUT: 'Page heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"94. Line Down",
["BRAILLE LINE: 'Immutable Page'",
" VISIBLE: 'Immutable Page', cursor=1",
"SPEECH OUTPUT: 'Immutable Page.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"95. Line Down",
["BRAILLE LINE: 'Info'",
" VISIBLE: 'Info', cursor=1",
"SPEECH OUTPUT: 'Info link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"96. Line Down",
["BRAILLE LINE: 'Attachments'",
" VISIBLE: 'Attachments', cursor=1",
"SPEECH OUTPUT: 'Attachments link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"97. Line Down",
["BRAILLE LINE: 'More Actions: combo box'",
" VISIBLE: 'More Actions: combo box', cursor=1",
"SPEECH OUTPUT: 'More Actions: combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"98. Line Down",
["BRAILLE LINE: 'GNOME World h3'",
" VISIBLE: 'GNOME World h3', cursor=1",
"SPEECH OUTPUT: 'GNOME World heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"99. Line Down",
["BRAILLE LINE: 'Wide h3'",
" VISIBLE: 'Wide h3', cursor=1",
"SPEECH OUTPUT: 'Wide heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"100. Line Down",
["BRAILLE LINE: 'GnomeWorldWide image'",
" VISIBLE: 'GnomeWorldWide image', cursor=1",
"SPEECH OUTPUT: 'GnomeWorldWide image link'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"101. Line Down",
["BRAILLE LINE: 'Copyright \xa9 2005, 2006, 2007 The GNOME Project.'",
" VISIBLE: 'Copyright \xa9 2005, 2006, 2007 The', cursor=1",
"SPEECH OUTPUT: 'Copyright \xa9 2005, 2006, 2007'",
"SPEECH OUTPUT: 'The GNOME Project link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"102. Line Down",
["BRAILLE LINE: 'Hosted by Red Hat.'",
" VISIBLE: 'Hosted by Red Hat.', cursor=1",
"SPEECH OUTPUT: 'Hosted by'",
"SPEECH OUTPUT: 'Red Hat link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
pvagner/orca
|
test/keystrokes/firefox/line_nav_wiki_down.py
|
Python
|
lgpl-2.1
| 41,454
|
[
"ORCA"
] |
5a97e78cf3fe43e68438fbc50ebceb5f1c026d95c3a0db5849101828d8272329
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: George Booth
# Qiming Sun <osirpt.sun@gmail.com>
#
import os, sys
from functools import reduce
import numpy
import pyscf.tools
import pyscf.lib.logger as logger
import pyscf.ao2mo
import pyscf.symm
import pyscf.fci
import pyscf.symm.param as param
from subprocess import call
try:
from pyscf.fciqmcscf import settings
except ImportError:
msg = '''settings.py not found for module fciqmcscf. Please create %s
''' % os.path.join(os.path.dirname(__file__), 'settings.py')
sys.stderr.write(msg)
try:
import settings
except ImportError:
import os, sys
msg = '''settings.py not found. Please create %s
''' % os.path.join(os.path.dirname(__file__), 'settings.py')
sys.stderr.write(msg)
IRREP_MAP = {'D2h': (1, # Ag
4, # B1g
6, # B2g
7, # B3g
8, # Au
5, # B1u
3, # B2u
2), # B3u
'C2v': (1, # A1
4, # A2
2, # B1
3), # B2
'C2h': (1, # Ag
4, # Bg
2, # Au
3), # Bu
'D2' : (1, # A
4, # B1
3, # B2
2), # B3
'Cs' : (1, # A'
2), # A"
'C2' : (1, # A
2), # B
'Ci' : (1, # Ag
2), # Au
'C1' : (1,)}
class FCIQMCCI(object):
def __init__(self, mol):
self.mol = mol
self.verbose = mol.verbose
self.stdout = mol.stdout
self.executable = settings.FCIQMCEXE
# Shouldn't need scratch dir settings.BLOCKSCRATCHDIR.
self.scratchDirectory = ''
self.generate_neci_input = True
self.integralFile = "FCIDUMP"
self.configFile = "neci.inp"
self.outputFileRoot = "neci.out"
self.outputFileCurrent = self.outputFileRoot
self.maxwalkers = 10000
self.maxIter = -1
self.InitShift = 0.1
self.RDMSamples = 5000
self.restart = False
self.time = 10
self.tau = -1.0
self.seed = 7
self.AddtoInit = 3
self.orbsym = []
self.pg_symmetry = 1
self.state_weights = [1.0]
# This is the number of spin orbitals to freeze in the NECI calculation.
# Note that if you do this for a CASSCF calculation, it will freeze in
# the active space.
self.nfreezecore = 0
self.nfreezevirt = 0
self.system_options = ''
self.calc_options = ''
self.logging_options = ''
if mol.symmetry:
self.groupname = mol.groupname
else:
self.groupname = None
self._keys = set(self.__dict__.keys())
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('******** FCIQMC options ********')
log.info('Number of walkers = %s', self.maxwalkers)
log.info('Maximum number of iterations = %d', self.maxIter)
def make_rdm12(self, fcivec, norb, nelec, link_index=None, **kwargs):
if isinstance(nelec, (int, numpy.integer)):
nelectrons = nelec
else:
nelectrons = nelec[0]+nelec[1]
nstates = len(self.state_weights)
# If norm != 1 then the state weights will need normalising.
norm = sum(self.state_weights)
two_pdm = numpy.zeros( (norb, norb, norb, norb) )
for irdm in range(nstates):
if self.state_weights[irdm] != 0.0:
dm_filename = 'spinfree_TwoRDM.' + str(irdm+1)
temp_dm = read_neci_two_pdm(self, dm_filename, norb,
self.scratchDirectory)
two_pdm += (self.state_weights[irdm]/norm)*temp_dm
one_pdm = one_from_two_pdm(two_pdm, nelectrons)
return one_pdm, two_pdm
def make_rdm1(self, fcivec, norb, nelec, link_index=None, **kwargs):
return self.make_rdm12(fcivec, norb, nelec, link_index, **kwargs)[0]
def kernel(self, h1e, eri, norb, nelec, fci_restart=None, ecore=0, **kwargs):
if fci_restart is None:
fci_restart = self.restart
if isinstance(nelec, (int, numpy.integer)):
neleca = nelec//2 + nelec%2
nelecb = nelec - neleca
else:
neleca, nelecb = nelec
write_integrals_file(h1e, eri, norb, neleca, nelecb, self, ecore)
if self.generate_neci_input:
write_fciqmc_config_file(self, neleca, nelecb, fci_restart)
if self.verbose >= logger.DEBUG1:
in_file = self.configFile
logger.debug1(self, 'FCIQMC Input file')
logger.debug1(self, open(in_file, 'r').read())
execute_fciqmc(self)
if self.verbose >= logger.DEBUG1:
out_file = self.outputFileCurrent
with open(out_file) as f:
logger.debug1(self, f.read())
rdm_energy = read_energy(self)
return rdm_energy, None
def calc_energy_from_rdms(mol, mo_coeff, one_rdm, two_rdm):
'''From the full density matrices, calculate the energy.
Args:
mol : An instance of :class:`Mole`
The molecule to calculate
mo_coeff: ndarray
The MO orbitals in which the RDMs are calculated
one_rdm: ndarray
The 1RDM
two_rdm: ndarray
The 2RDM as RDM_ijkl = < a^+_is a^+_kt a_lt a_js >.
'''
nmo = mo_coeff.shape[1]
eri = pyscf.ao2mo.full(mol, mo_coeff, verbose=0)
eri = pyscf.ao2mo.restore(1, eri, nmo)
t = mol.intor_symmetric('cint1e_kin_sph')
v = mol.intor_symmetric('cint1e_nuc_sph')
h = reduce(numpy.dot, (mo_coeff.T, t+v, mo_coeff))
two_e = numpy.einsum('ijkl,ijkl->', eri, two_rdm) * 0.5
one_e = numpy.einsum('ij,ij->', h, one_rdm)
return two_e + one_e + mol.energy_nuc()
def run_standalone(fciqmcci, scf_obj, orbs=None, restart=None):
'''Run a standalone NECI calculation for the molecule listed in the
FCIQMCCI object. The basis to run this calculation in is given by the
orbs array.
Args:
fciqmcci : an instance of :class:`FCIQMCCI`
FCIQMC calculation containing parameters of NECI calculation to
run.
mo_coeff : ndarray
Orbital coefficients. Each column is one orbital.
restart : bool
Is this a restarted NECI calculation?
Returns:
rdm_energy : float
Final RDM energy obtained from the NECI output file.
'''
if orbs is None:
orbs = scf_obj.mo_coeff
tol = 1e-9
if isinstance(orbs,tuple):
# Assume UHF
print('uhf orbitals detected')
nmo = orbs[0].shape[1]
tUHF = True
else:
print('rhf orbitals detected')
nmo = orbs.shape[1]
tUHF = False
nelec = fciqmcci.mol.nelectron
fciqmcci.dump_flags(verbose=5)
if fciqmcci.mol.symmetry:
if fciqmcci.groupname == 'Dooh':
logger.info(fciqmcci, 'Lower symmetry from Dooh to D2h')
raise RuntimeError('''Lower symmetry from Dooh to D2h''')
elif fciqmcci.groupname == 'Coov':
logger.info(fciqmcci, 'Lower symmetry from Coov to C2v')
raise RuntimeError('''Lower symmetry from Coov to C2v''')
else:
# We need the AO basis overlap matrix to calculate the
# symmetries.
if tUHF:
fciqmcci.orbsym = pyscf.symm.label_orb_symm(fciqmcci.mol,
fciqmcci.mol.irrep_name, fciqmcci.mol.symm_orb,
orbs[0])
tmp_orblist = fciqmcci.orbsym.tolist()
tmp_orblist += pyscf.symm.label_orb_symm(fciqmcci.mol,
fciqmcci.mol.irrep_name, fciqmcci.mol.symm_orb,
orbs[1]).tolist()
fciqmcci.orbsym = numpy.array(tmp_orblist)
orbsym = [param.IRREP_ID_TABLE[fciqmcci.groupname][i]+1 for
i in fciqmcci.orbsym]
else:
fciqmcci.orbsym = pyscf.symm.label_orb_symm(fciqmcci.mol,
fciqmcci.mol.irrep_name, fciqmcci.mol.symm_orb,
orbs)
orbsym = [param.IRREP_ID_TABLE[fciqmcci.groupname][i]+1 for
i in fciqmcci.orbsym]
# pyscf.tools.fcidump.write_head(fout, nmo, nelec,
# fciqmcci.mol.spin, orbsym)
else:
orbsym = []
# eri = pyscf.ao2mo.outcore.full(fciqmcci.mol, orbs, verbose=0)
# Lookup and return the relevant 1-electron integrals, and print out
# the FCIDUMP file.
if tUHF:
write_uhf_integrals_neci(fciqmcci,scf_obj,nmo,nelec,orbs,orbsym,tol=tol)
else:
eri = pyscf.ao2mo.incore.general(scf_obj._eri, (orbs,)*4, compact=False)
h_core = scf_obj.get_hcore(fciqmcci.mol)
# t = fciqmcci.mol.intor_symmetric('cint1e_kin_sph')
# v = fciqmcci.mol.intor_symmetric('cint1e_nuc_sph')
h = reduce(numpy.dot, (orbs.T, h_core, orbs))
pyscf.tools.fcidump.from_integrals(fciqmcci.integralFile, h,
pyscf.ao2mo.restore(8,eri,nmo), nmo, nelec, fciqmcci.mol.energy_nuc(),
fciqmcci.mol.spin, orbsym, tol=tol)
# pyscf.tools.fcidump.write_eri(fout, pyscf.ao2mo.restore(8,eri,nmo),
# nmo, tol=tol)
# pyscf.tools.fcidump.write_hcore(fout, h, nmo, tol=tol)
# fout.write(' %.16g 0 0 0 0\n' % fciqmcci.mol.energy_nuc())
# The number of alpha and beta electrons.
if isinstance(nelec, (int, numpy.integer)):
neleca = nelec//2 + nelec%2
nelecb = nelec - neleca
else:
neleca, nelecb = nelec
if fciqmcci.generate_neci_input:
write_fciqmc_config_file(fciqmcci, neleca, nelecb, restart, tUHF)
if fciqmcci.verbose >= logger.DEBUG1:
in_file = fciqmcci.configFile
logger.debug1(fciqmcci, 'FCIQMC Input file')
logger.debug1(fciqmcci, open(in_file, 'r').read())
execute_fciqmc(fciqmcci)
if fciqmcci.verbose >= logger.DEBUG1:
out_file = fciqmcci.outputFileCurrent
logger.debug1(fciqmcci, open(out_file))
rdm_energy = read_energy(fciqmcci)
return rdm_energy
def write_uhf_integrals_neci(fciqmcci,scf_obj,nmo,nelec,orbs,orbsym,tol=1e-15):
''' nmo is number of MO orbitals per spin channel
note that ordering is abababa... '''
eri_aaaa = pyscf.ao2mo.restore(8,pyscf.ao2mo.incore.general(scf_obj._eri, (orbs[0],orbs[0],orbs[0],orbs[0]), compact=False),nmo)
eri_bbbb = pyscf.ao2mo.restore(8,pyscf.ao2mo.incore.general(scf_obj._eri, (orbs[1],orbs[1],orbs[1],orbs[1]), compact=False),nmo)
eri_aabb = pyscf.ao2mo.restore(8,pyscf.ao2mo.incore.general(scf_obj._eri, (orbs[0],orbs[0],orbs[1],orbs[1]), compact=False),nmo)
eri_bbaa = pyscf.ao2mo.restore(8,pyscf.ao2mo.incore.general(scf_obj._eri, (orbs[1],orbs[1],orbs[0],orbs[0]), compact=False),nmo)
h_core = scf_obj.get_hcore(fciqmcci.mol)
# t = fciqmcci.mol.intor_symmetric('cint1e_kin_sph')
# v = fciqmcci.mol.intor_symmetric('cint1e_nuc_sph')
h_aa = reduce(numpy.dot, (orbs[0].T, h_core, orbs[0]))
h_bb = reduce(numpy.dot, (orbs[1].T, h_core, orbs[1]))
nuc = fciqmcci.mol.energy_nuc()
float_format = ' %.16g'
# Stupidly, NECI wants its orbitals as a,b,a,b,a,b rather than aaaabbbb
# Reorder things so this is the case
assert(len(orbsym) % 2 == 0)
orbsym_reorder = [i for tup in zip(orbsym[:len(orbsym)/2], orbsym[len(orbsym)/2:]) for i in tup]
a_inds = [i*2+1 for i in range(orbs[0].shape[1])]
b_inds = [i*2+2 for i in range(orbs[1].shape[1])]
with open(fciqmcci.integralFile, 'w') as fout:
if not isinstance(nelec, (int, numpy.number)):
ms = abs(nelec[0] - nelec[1])
nelec = nelec[0] + nelec[1]
else: ms=0
fout.write(' &FCI NORB=%4d,NELEC=%2d,MS2=%d,\n' % (nmo*2, nelec, ms))
if orbsym is not None and len(orbsym_reorder) > 0:
fout.write(' ORBSYM=%s\n' % ','.join([str(x) for x in orbsym_reorder]))
else:
fout.write(' ORBSYM=%s\n' % ('1,' * 2*nmo))
fout.write(' ISYM=1, UHF=TRUE\n')
fout.write(' &END\n')
# Assume 8-fold symmetry
npair = nmo*(nmo+1)//2
output_format = float_format + ' %4d %4d %4d %4d\n'
ij = 0
ijkl = 0
for i in range(nmo):
for j in range(0, i+1):
kl = 0
for k in range(0, i+1):
for l in range(0, k+1):
if ij >= kl:
if abs(eri_aaaa[ijkl]) > tol:
fout.write(output_format % (eri_aaaa[ijkl], a_inds[i], a_inds[j], a_inds[k], a_inds[l]))
if abs(eri_bbbb[ijkl]) > tol:
fout.write(output_format % (eri_bbbb[ijkl], b_inds[i], b_inds[j], b_inds[k], b_inds[l]))
if abs(eri_aabb[ijkl]) > tol:
fout.write(output_format % (eri_aabb[ijkl], a_inds[i], a_inds[j], b_inds[k], b_inds[l]))
if abs(eri_bbaa[ijkl]) > tol:
fout.write(output_format % (eri_bbaa[ijkl], b_inds[i], b_inds[j], a_inds[k], a_inds[l]))
ijkl += 1
kl += 1
ij += 1
h_aa = h_aa.reshape(nmo,nmo)
h_bb = h_bb.reshape(nmo,nmo)
output_format = float_format + ' %4d %4d 0 0\n'
for i in range(nmo):
for j in range(0, i+1):
if abs(h_aa[i,j]) > tol:
fout.write(output_format % (h_aa[i,j], a_inds[i], a_inds[j]))
if abs(h_bb[i,j]) > tol:
fout.write(output_format % (h_bb[i,j], b_inds[i], b_inds[j]))
output_format = float_format + ' 0 0 0 0\n'
fout.write(output_format % nuc)
return
def write_fciqmc_config_file(fciqmcci, neleca, nelecb, restart, tUHF=False):
'''Write an input file for a NECI calculation.
Args:
fciqmcci : an instance of :class:`FCIQMCCI`
Contains all the parameters used to create the input file.
neleca : int
The number of alpha electrons.
nelecb : int
The number of beta electrons.
restart : bool
Is this a restarted NECI calculation?
'''
config_file = fciqmcci.configFile
nstates = len(fciqmcci.state_weights)
f = open(config_file, 'w')
f.write('title\n')
f.write('\n')
f.write('system read noorder\n')
f.write('symignoreenergies\n')
f.write('freeformat\n')
f.write('electrons %d\n' % (neleca+nelecb))
# fci-core requires these two options.
f.write('spin-restrict %d\n' %(-fciqmcci.mol.spin))
f.write('sym %d 0 0 0\n' % (fciqmcci.pg_symmetry-1))
f.write('nonuniformrandexcits 4ind-weighted\n')
if not (tUHF or fciqmcci.mol.spin != 0):
f.write('hphf 0\n')
f.write('nobrillouintheorem\n')
if nstates > 1:
f.write('system-replicas %d\n' % (2*nstates))
if fciqmcci.system_options:
f.write(fciqmcci.system_options + '\n')
f.write('endsys\n')
f.write('\n')
f.write('calc\n')
f.write('methods\n')
f.write('method vertex fcimc\n')
f.write('endmethods\n')
f.write('time %f\n' % fciqmcci.time)
f.write('memoryfacpart 2.0\n')
f.write('memoryfacspawn 1.0\n')
f.write('totalwalkers %d\n' % fciqmcci.maxwalkers)
f.write('nmcyc %d\n' % fciqmcci.maxIter)
f.write('seed %d\n' % fciqmcci.seed)
if (restart):
f.write('readpops')
else:
f.write('startsinglepart\n')
f.write('diagshift %f\n' % fciqmcci.InitShift)
f.write('rdmsamplingiters %d\n' % fciqmcci.RDMSamples)
f.write('shiftdamp 0.05\n')
if (fciqmcci.tau != -1.0):
f.write('tau 0.01\n')
f.write('truncinitiator\n')
f.write('addtoinitiator %d\n' % fciqmcci.AddtoInit)
f.write('allrealcoeff\n')
f.write('realspawncutoff 0.4\n')
f.write('semi-stochastic\n')
f.write('mp1-core 2000\n')
# f.write('fci-core\n')
# f.write('trial-wavefunction 5\n')
f.write('jump-shift\n')
f.write('proje-changeref 1.5\n')
f.write('stepsshift 10\n')
f.write('maxwalkerbloom 3\n')
# Dynamic load-balancing is incompatible with semi-stochastic.
# Ok if restarting from a semi-stochastic popsfile,
# (where it will do one redistribution) but not otherwise.
f.write('load-balance-blocks off\n')
if nstates > 1:
f.write('orthogonalise-replicas\n')
f.write('doubles-init\n')
f.write('multi-ref-shift\n')
# f.write('fci-init\n')
if fciqmcci.calc_options:
f.write(fciqmcci.calc_options + '\n')
f.write('endcalc\n')
f.write('\n')
f.write('integral\n')
f.write('freeze %d %d\n' % (fciqmcci.nfreezecore, fciqmcci.nfreezevirt))
f.write('endint\n')
f.write('\n')
f.write('logging\n')
f.write('popsfiletimer 60.0\n')
f.write('binarypops\n')
f.write('calcrdmonfly 3 500 500\n')
f.write('write-spin-free-rdm\n')
f.write('printonerdm\n')
if fciqmcci.logging_options:
f.write(fciqmcci.logging_options + '\n')
f.write('endlog\n')
f.write('end\n')
f.close()
def write_integrals_file(h1e, eri, norb, neleca, nelecb, fciqmcci, ecore=0):
'''Write an integral dump file, based on the integrals provided.
Args:
h1e : 2D ndarray
Core Hamiltonian.
eri : 2D ndarray
Two-electron integrals.
norb : int
Number of orbitals.
neleca : int
Number of alpha electrons.
nelecb : int
Number of beta electrons
fciqmcci : an instance of :class:`FCIQMCCI`
FCIQMC calculation, used to access the integral dump file name and
some symmetry properties.
'''
integralFile = os.path.join(fciqmcci.scratchDirectory,fciqmcci.integralFile)
# Ensure 4-fold symmetry.
eri = pyscf.ao2mo.restore(4, eri, norb)
if fciqmcci.mol.symmetry and fciqmcci.orbsym is not []:
orbsym = [IRREP_MAP[fciqmcci.groupname][i] for i in fciqmcci.orbsym]
else:
orbsym = []
pyscf.tools.fcidump.from_integrals(integralFile, h1e, eri, norb,
neleca+nelecb, ecore, ms=abs(neleca-nelecb),
orbsym=orbsym, tol=1e-10)
def execute_fciqmc(fciqmcci):
'''Call the external FCIQMC program.
Args:
fciqmcci : an instance of :class:`FCIQMCCI`
Specifies the FCIQMC calculation.
'''
in_file = os.path.join(fciqmcci.scratchDirectory, fciqmcci.configFile)
outfiletmp = fciqmcci.outputFileRoot
files = os.listdir(fciqmcci.scratchDirectory + '.')
# Search for an unused output file.
i = 1
while outfiletmp in files:
outfiletmp = fciqmcci.outputFileRoot + '_' + str(i)
i += 1
logger.info(fciqmcci, 'FCIQMC output file: %s', outfiletmp)
fciqmcci.outputFileCurrent = outfiletmp
out_file = os.path.join(fciqmcci.scratchDirectory, outfiletmp)
if fciqmcci.executable == 'external':
logger.info(fciqmcci, 'External FCIQMC calculation requested from '
'dumped integrals.')
logger.info(fciqmcci, 'Waiting for density matrices and output file '
'to be returned.')
try:
raw_input("Press Enter to continue with calculation...")
except:
input("Press Enter to continue with calculation...")
else:
call("%s %s > %s" % (fciqmcci.executable, in_file, out_file), shell=True)
def read_energy(fciqmcci):
'''Read and return the final RDM energy from a NECI output file.
Args:
fciqmcci : an instance of :class:`FCIQMCCI`
Specifies the FCIQMC calculation. Used to locate the FCIQMC output
file.
Returns:
rdm_energy : float
The final RDM energy printed to the output file.
'''
out_file = open(os.path.join(fciqmcci.scratchDirectory,
fciqmcci.outputFileCurrent), "r")
for line in out_file:
# Lookup the RDM energy from the output.
if "*TOTAL ENERGY* CALCULATED USING THE" in line:
rdm_energy = float(line.split()[-1])
break
logger.info(fciqmcci, 'Total energy from FCIQMC: %.15f', rdm_energy)
out_file.close()
return rdm_energy
def read_neci_one_pdm(fciqmcci, filename, norb, nelec, directory='.'):
'''Obtain the spin-free 1RDM from neci by reading in the spin free 2RDM.
If core orbitals have been indicated as frozen in neci, this core contribution
will be explicitly added back in to the RDM. Therefore, the norb parameter
should be the total number of orbitals passed to neci (inc. frozen), while
nelec is the total number of electrons (inc. frozen), but not inactive if running
through CASSCF.
'''
two_pdm = read_neci_two_pdm(fciqmcci, filename, norb, directory)
one_pdm = one_from_two_pdm(two_pdm, nelec)
return one_pdm
def read_neci_1dms(fciqmcci, norb, nelec, filename='OneRDM.1', directory='.'):
''' Read spinned rdms, as they are in the neci output '''
f = open(os.path.join(directory, filename),'r')
dm1a = numpy.zeros((norb,norb))
dm1b = numpy.zeros((norb,norb))
for line in f.readlines():
linesp = line.split()
i, j = int(linesp[0]), int(linesp[1])
assert((i % 2) == (j % 2))
if i % 2 == 1:
# alpha
assert(all(x<norb for x in (i/2,j/2)))
dm1a[i/2,j/2] = float(linesp[2])
dm1a[j/2,i/2] = float(linesp[2])
else:
assert(all(x<norb for x in (i/2 - 1,j/2 - 1)))
dm1b[i/2 - 1,j/2 - 1] = float(linesp[2])
dm1b[j/2 - 1,i/2 - 1] = float(linesp[2])
f.close()
assert(numpy.allclose(dm1a.trace()+dm1b.trace(),sum(nelec)))
return dm1a, dm1b
def read_neci_2dms(fciqmcci, norb, nelec, filename_aa='TwoRDM_aaaa.1',
filename_abba='TwoRDM_abba.1', filename_abab='TwoRDM_abab.1', directory='.', reorder=True,
dm1a=None,dm1b=None):
''' Find spinned RDMs (assuming a/b symmetry). Return in pyscf form that
you would get from the e.g. direct_spin1.make_rdm12s routine, with Reorder=True.
This means (assuming reorder = True):
dm2ab[i,j,k,l] = < i_a* k_b* l_b j_a >
dm2aa[i,j,k,l] = < i_a* k_a* l_a j_a >
to get the dm2abba matrix (see spin_op.make_rdm2_abba) from this (assuming rhf), then you need
dm2abba = -dm2ab.transpose(2,1,0,3)
if reorder = False:
dm2aa[:,k,k,:] += dm1a
dm2bb[:,k,k,:] += dm1b
dm2ab unchanged
Note that the spin-free RDMs are just dm2aa + dm2bb + 2*dm2ab if reorder = True
'''
f = open(os.path.join(directory, filename_aa),'r')
dm2aa = numpy.zeros((norb,norb,norb,norb))
for line in f.readlines():
linesp = line.split()
i,j,k,l = (int(linesp[0])-1, int(linesp[1])-1, int(linesp[3])-1, int(linesp[2])-1)
val = float(linesp[4])
assert(all(x<norb for x in (i,j,k,l)))
# Stored as 1* 2* 4 3
dm2aa[i,j,k,l] = val
# Other permutations
dm2aa[j,i,k,l] = -val
dm2aa[i,j,l,k] = -val
dm2aa[j,i,l,k] = val
# Hermitian conjugate symmetry, assuming real orbitals
dm2aa[l,k,j,i] = val
# Other permutations
dm2aa[l,k,i,j] = -val
dm2aa[k,l,j,i] = -val
dm2aa[k,l,i,j] = val
f.close()
dm2bb = dm2aa.copy() #spin symmetry
# dm2ab initially (before reordering) stores [a,b,b,a]
dm2ab = numpy.zeros((norb,norb,norb,norb))
f_abba = open(os.path.join(directory, filename_abba),'r')
f_abab = open(os.path.join(directory, filename_abab),'r')
for line in f_abba.readlines():
linesp = line.split()
i,j,k,l = (int(linesp[0])-1, int(linesp[1])-1, int(linesp[2])-1, int(linesp[3])-1)
val = float(linesp[4])
assert(all(x<norb for x in (i,j,k,l)))
assert(numpy.allclose(dm2ab[i,j,k,l],-val) or dm2ab[i,j,k,l] == 0.0)
dm2ab[i,j,k,l] = -val
# Hermitian conjugate
assert(numpy.allclose(dm2ab[l,k,j,i],-val) or dm2ab[l,k,j,i] == 0.0)
dm2ab[l,k,j,i] = -val
# Time reversal sym
# print(i,j,k,l,val,dm2ab[j,i,l,k],
# numpy.allclose(dm2ab[j,i,l,k],-val), dm2ab[j,i,l,k] == 0.0)
assert(numpy.allclose(dm2ab[j,i,l,k],-val) or dm2ab[j,i,l,k] == 0.0)
dm2ab[j,i,l,k] = -val
assert(numpy.allclose(dm2ab[k,l,i,j],-val) or dm2ab[k,l,i,j] == 0.0)
dm2ab[k,l,i,j] = -val
for line in f_abab.readlines():
linesp = line.split()
i,j,k,l = (int(linesp[0])-1, int(linesp[1])-1, int(linesp[2])-1, int(linesp[3])-1)
val = float(linesp[4])
assert(all(x<norb for x in (i,j,k,l)))
assert(numpy.allclose(dm2ab[i,j,l,k],val) or dm2ab[i,j,l,k] == 0.0)
dm2ab[i,j,l,k] = val
# Hermitian conjugate
assert(numpy.allclose(dm2ab[k,l,j,i],val) or dm2ab[k,l,j,i] == 0.0)
dm2ab[k,l,j,i] = val
# Time reversal symmetry
assert(numpy.allclose(dm2ab[j,i,k,l],val) or dm2ab[j,i,k,l] == 0.0)
dm2ab[j,i,k,l] = val
assert(numpy.allclose(dm2ab[l,k,i,j],val) or dm2ab[l,k,i,j] == 0.0)
dm2ab[l,k,i,j] = val
f_abab.close()
f_abba.close()
# i.e. I want the last index to go second
dm2aa = dm2aa.transpose(0,3,1,2)
dm2bb = dm2bb.transpose(0,3,1,2)
dm2ab = dm2ab.transpose(0,3,1,2)
if not reorder:
# We need to undo the reordering routine in rdm.py
if dm1a is None:
pdmfile = filename_aa.split('.')
pdmfile = 'OneRDM.'+pdmfile[1]
dm1a, dm1b = read_neci_1dms(fciqmcci, norb, nelec, filename=pdmfile, directory=directory)
for k in range(norb):
dm2aa[:,k,k,:] += dm1a
dm2bb[:,k,k,:] += dm1b
return dm2aa, dm2ab, dm2bb
def add_spinned_core_rdms(mf, ncore, dm1a_act, dm1b_act, dm2aa_act, dm2ab_act, dm2bb_act, reorder=True):
''' Add an RHF core to the rdms in the MO basis to the 1 and 2 RDMs'''
norb = ncore + dm1a_act.shape[0]
dm1a = numpy.zeros((norb,norb))
dm1b = numpy.zeros((norb,norb))
dm2aa = numpy.zeros((norb,norb,norb,norb))
dm2ab = numpy.zeros((norb,norb,norb,norb))
dm2bb = numpy.zeros((norb,norb,norb,norb))
if not reorder:
# Assume that the ordering of the active rdms is 'False'.
# Switch before including (back to 'true')
dm1a_act_, dm2aa_act_ = pyscf.fci.rdm.reorder_rdm(dm1a_act, dm2aa_act, inplace=False)
dm1b_act_, dm2bb_act_ = pyscf.fci.rdm.reorder_rdm(dm1b_act, dm2bb_act, inplace=False)
else:
dm1a_act_ = dm1a_act
dm1b_act_ = dm1b_act
dm2aa_act_ = dm2aa_act
dm2bb_act_ = dm2bb_act
# Always add the core to the 'reorder=True' ordering of the rdms
dm1a[ncore:,ncore:] = dm1a_act_
dm1b[ncore:,ncore:] = dm1b_act_
for i in range(ncore):
dm1a[i,i] = 1.0
dm1b[i,i] = 1.0
dm2aa[ncore:,ncore:,ncore:,ncore:] = dm2aa_act_
dm2bb[ncore:,ncore:,ncore:,ncore:] = dm2bb_act_
dm2ab[ncore:,ncore:,ncore:,ncore:] = dm2ab_act
for i in range(ncore):
for j in range(ncore):
dm2aa[i,i,j,j] += 1.0
dm2aa[j,i,i,j] += -1.0
dm2bb[i,i,j,j] += 1.0
dm2bb[j,i,i,j] += -1.0
dm2ab[i,i,j,j] += 1.0
for p in range(ncore,norb):
for q in range(ncore,norb):
dm2aa[p,q,i,i] += dm1a[p,q]
dm2aa[i,i,p,q] += dm1a[p,q]
dm2aa[i,q,p,i] += -dm1a[p,q]
dm2aa[p,i,i,q] += -dm1a[p,q]
dm2bb[p,q,i,i] += dm1b[p,q]
dm2bb[i,i,p,q] += dm1b[p,q]
dm2bb[i,q,p,i] += -dm1b[p,q]
dm2bb[p,i,i,q] += -dm1b[p,q]
dm2ab[p,q,i,i] += dm1a[p,q]
dm2ab[i,i,p,q] += dm1b[p,q]
if not reorder:
# Change back to the 'non-reordered' ordering!
for k in range(norb):
dm2aa[:,k,k,:] += dm1a
dm2bb[:,k,k,:] += dm1b
return dm1a, dm1b, dm2aa, dm2ab, dm2bb
def read_neci_two_pdm(fciqmcci, filename, norb, directory='.'):
'''Read a spin-free 2-rdm output from a NECI calculation, and return it in
a form supported by pyscf. Note that the RDMs in neci are written in
as RDM_ijkl = < a^+_is a^+_jt a_lt a_ks >. In pyscf, the correlated _after
reordering_ is 2RDM_ijkl = < a^+_is a^+_kt a_lt a_js >, where s and t are spin
indices to be summed over. Therefore, the middle two indices need to be swapped.
If core orbitals have been indicated as frozen in neci, this core contribution
will be explicitly added back in to the RDM. Therefore, the norb parameter
should be the unfrozen number of orbitals passed to neci, but not inactive
if running through CASSCF.
Args:
filename : str
Name of the file to read the 2-rdm from.
norb : int
The number of orbitals inc. frozen in neci, and therefore the
number of values each 2-rdm index can take.
directory : str
The directory in which to search for the 2-rdm file.
Returns:
two_pdm : ndarray
The read-in 2-rdm.
'''
f = open(os.path.join(directory, filename), 'r')
nfrzorb = fciqmcci.nfreezecore//2
norb_active = norb - nfrzorb
two_pdm_active = numpy.zeros( (norb_active, norb_active, norb_active, norb_active) )
for line in f.readlines():
linesp = line.split()
if(int(linesp[0]) != -1):
# Arrays from neci are '1' indexed
# We reorder from D[i,j,k,l] = < i^+ j^+ l k >
# to D[i,j,k,l] = < i^+ k^+ l j > to match pyscf
# Therefore, all we need to do is to swap the middle two indices.
ind1 = int(linesp[0]) - 1
ind2 = int(linesp[2]) - 1
ind3 = int(linesp[1]) - 1
ind4 = int(linesp[3]) - 1
assert(int(ind1) < norb_active)
assert(int(ind2) < norb_active)
assert(int(ind3) < norb_active)
assert(int(ind4) < norb_active)
assert(ind1 >= 0)
assert(ind2 >= 0)
assert(ind3 >= 0)
assert(ind4 >= 0)
two_pdm_active[ind1, ind2, ind3, ind4] = float(linesp[4])
f.close()
# In order to add any frozen core, we first need to find the spin-free
# 1-RDM in the active space.
one_pdm_active = one_from_two_pdm(two_pdm_active,fciqmcci.mol.nelectron-fciqmcci.nfreezecore)
# Copy the 2RDM part of the active space.
two_pdm = numpy.zeros( (norb, norb, norb, norb) )
actstart = nfrzorb
actend = norb - fciqmcci.nfreezevirt/2
two_pdm[actstart:actend, actstart:actend, actstart:actend, actstart:actend] = two_pdm_active
# Interaction between frozen and active space.
for p in range(nfrzorb):
# p loops over frozen spatial orbitals.
for r in range(actstart,actend):
for s in range(actstart,actend):
two_pdm[p,p,r,s] += 2.0*one_pdm_active[r-nfrzorb,s-nfrzorb]
two_pdm[r,s,p,p] += 2.0*one_pdm_active[r-nfrzorb,s-nfrzorb]
two_pdm[p,r,s,p] -= one_pdm_active[r-nfrzorb,s-nfrzorb]
two_pdm[r,p,p,s] -= one_pdm_active[r-nfrzorb,s-nfrzorb]
# Add on frozen core contribution, assuming that the core orbitals are
# doubly occupied.
for i in range(nfrzorb):
for j in range(nfrzorb):
two_pdm[i,i,j,j] += 4.0
two_pdm[i,j,j,i] += -2.0
return two_pdm
def one_from_two_pdm(two_pdm, nelec):
'''Return a 1-rdm, given a 2-rdm to contract.
Args:
two_pdm : ndarray
A (spin-free) 2-particle reduced density matrix.
nelec: int
The number of electrons contributing to the RDMs.
Returns:
one_pdm : ndarray
The (spin-free) 1-particle reduced density matrix.
'''
# Last two indices refer to middle two second quantized operators in the 2RDM
one_pdm = numpy.einsum('ikjj->ik', two_pdm)
one_pdm /= (numpy.sum(nelec)-1)
return one_pdm
def find_full_casscf_12rdm(fciqmcci, mo_coeff, filename, norbcas, neleccas, directory='.'):
'''Return the 1 and 2 full RDMs after a CASSCF calculation, by adding
on the contributions from the inactive spaces. Requires the cas space to
be given, as we as a set of mo coefficients in the complete space.
'''
two_pdm = read_neci_two_pdm(fciqmcci, filename, norbcas, directory)
one_pdm = one_from_two_pdm(two_pdm, neleccas)
return add_inactive_space_to_rdm(fciqmcci.mol, mo_coeff, one_pdm, two_pdm)
def add_inactive_space_to_rdm(mol, mo_coeff, one_pdm, two_pdm):
'''If a CASSCF calculation has been done, the final RDMs from neci will
not contain the doubly occupied inactive orbitals. This function will add
them and return the full density matrices.
'''
# Find number of inactive electrons by taking the number of electrons
# as the trace of the 1RDM, and subtracting from the total number of
# electrons
ninact = (mol.nelectron - int(round(numpy.trace(one_pdm)))) / 2
norb = mo_coeff.shape[1]
nsizerdm = one_pdm.shape[0]
one_pdm_ = numpy.zeros( (norb, norb) )
# Add the core first.
for i in range(ninact):
one_pdm_[i,i] = 2.0
# Add the rest of the density matrix.
one_pdm_[ninact:ninact+nsizerdm,ninact:ninact+nsizerdm] = one_pdm[:,:]
two_pdm_ = numpy.zeros( (norb, norb, norb, norb) )
# Add on frozen core contribution, assuming that the inactive orbitals are
# doubly occupied.
for i in range(ninact):
for j in range(ninact):
two_pdm_[i,i,j,j] += 4.0
two_pdm_[i,j,j,i] += -2.0
# Inactve-Active elements.
for p in range(ninact):
for r in range(ninact,ninact+nsizerdm):
for s in range(ninact,ninact+nsizerdm):
two_pdm_[p,p,r,s] += 2.0*one_pdm_[r,s]
two_pdm_[r,s,p,p] += 2.0*one_pdm_[r,s]
two_pdm_[p,r,s,p] -= one_pdm_[r,s]
two_pdm_[r,p,p,s] -= one_pdm_[r,s]
# Add active space.
two_pdm_[ninact:ninact+nsizerdm,ninact:ninact+nsizerdm, \
ninact:ninact+nsizerdm,ninact:ninact+nsizerdm] = \
two_pdm[:,:]
return one_pdm_, two_pdm_
def calc_dipole(mol, mo_coeff, one_pdm):
'''Calculate and return the dipole moment for a given molecule, set of
molecular orbital coefficients and a 1-rdm.
Args:
mol : an instance of :class:`Mole`
Specifies the molecule.
mo_coeff : ndarray
Orbital coefficients. Each column is one orbital.
one_pdm : ndarray
1-rdm.
Returns:
tot_dipmom : list of float
The total dipole moment of the system in each dimension.
elec_dipmom : list of float
The electronic component of the dipole moment in each dimension.
nuc_dipmom : list of float
The nuclear component of the dipole moment in each dimension.
'''
assert(one_pdm.shape[0] == one_pdm.shape[1])
norb = mo_coeff.shape[1]
nsizerdm = one_pdm.shape[0]
if nsizerdm != norb:
raise RuntimeError('''Size of 1RDM is not the same size as number of
orbitals. Have you correctly included the external space if
running from CASSCF??''')
# Call the integral generator for r integrals in the AO basis. There
# are 3 dimensions for x, y and z components.
aodmints = mol.intor('cint1e_r_sph', comp=3)
# modmints will hold the MO transformed integrals.
modmints = numpy.empty_like(aodmints)
# For each component, transform integrals into the MO basis.
for i in range(aodmints.shape[0]):
modmints[i] = reduce(numpy.dot, (mo_coeff.T, aodmints[i], mo_coeff))
# Contract with MO r integrals for electronic contribution.
elec_dipmom = []
for i in range(modmints.shape[0]):
elec_dipmom.append( -numpy.trace( numpy.dot( one_pdm, modmints[i])) )
# Nuclear contribution.
nuc_dipmom = [0.0, 0.0, 0.0]
for i in range(mol.natm):
for j in range(aodmints.shape[0]):
nuc_dipmom[j] += mol.atom_charge(i)*mol.atom_coord(i)[j]
tot_dipmom = [a+b for (a,b) in zip(elec_dipmom, nuc_dipmom)]
return tot_dipmom, elec_dipmom, nuc_dipmom
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf.tools import molden
b = 1.4
mol = gto.Mole()
mol.build(
verbose = 5,
output = None, #'out-fciqmc',
atom = [['H', (0.,0.,i)] for i in range(8)],
basis = {'H': 'sto-3g'},
symmetry = True,
# fciqmc cannot handle Dooh currently, so reduce the point group if
# full group is infinite.
symmetry_subgroup = 'D2h',
)
m = scf.RHF(mol)
m.scf()
mc = mcscf.CASSCF(m, 4, 4)
mc.fcisolver = FCIQMCCI(mol)
mc.fcisolver.tau = 0.01
mc.fcisolver.RDMSamples = 1000
mc.max_cycle_macro = 10
# Return natural orbitals from mc2step in casscf_mo.
mc.natorb = True
emc_1, e_ci, fcivec, casscf_mo, mo_energy = mc.mc2step(m.mo_coeff)
# Write orbitals to molden output.
with open( 'output.molden', 'w' ) as fout:
molden.header(mol, fout)
molden.orbital_coeff(mol, fout, casscf_mo)
# Now, calculate the full RDMs for the full energy.
one_pdm, two_pdm = find_full_casscf_12rdm(mc.fcisolver, casscf_mo,
'spinfree_TwoRDM.1', 4, 4)
e = calc_energy_from_rdms(mol, casscf_mo, one_pdm, two_pdm)
print('Energy from rdms and CASSCF should be the same: ',e,emc_1)
mc = mcscf.CASCI(m, 4, 4)
mc.fcisolver = FCIQMCCI(mol)
mc.fcisolver.tau = 0.01
mc.fcisolver.RDMSamples = 1000
emc_0 = mc.casci()[0]
b = 1.4
mol = gto.Mole()
mol.build(
verbose = 5,
output = None,
atom = [['H', (0.,0.,i)] for i in range(8)],
basis = {'H': 'sto-3g'},
symmetry = True,
symmetry_subgroup = 'D2h',
)
m = scf.RHF(mol)
m.scf()
mc = mcscf.CASSCF(m, 4, 4)
emc_1ref = mc.mc2step()[0]
mc = mcscf.CASCI(m, 4, 4)
emc_0ref = mc.casci()[0]
print('FCIQMCCI = %.15g CASCI = %.15g' % (emc_0, emc_0ref))
print('FCIQMCSCF = %.15g CASSCF = %.15g' % (emc_1, emc_1ref))
|
gkc1000/pyscf
|
pyscf/fciqmcscf/fciqmc.py
|
Python
|
apache-2.0
| 39,629
|
[
"PySCF"
] |
7506175a321c98a863533baa16f5f569faf20888b92f70344fd8a08591986b10
|
from tapiriik.settings import WEB_ROOT, HTTP_SOURCE_ADDR, GARMIN_CONNECT_USER_WATCH_ACCOUNTS
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, Location, Lap
from tapiriik.services.api import APIException, APIWarning, APIExcludeActivity, UserException, UserExceptionType
from tapiriik.services.statistic_calculator import ActivityStatisticCalculator
from tapiriik.services.tcx import TCXIO
from tapiriik.services.gpx import GPXIO
from tapiriik.services.fit import FITIO
from tapiriik.services.sessioncache import SessionCache
from tapiriik.services.devices import DeviceIdentifier, DeviceIdentifierType, Device
from tapiriik.database import cachedb, db
from django.core.urlresolvers import reverse
import pytz
from datetime import datetime, timedelta
import requests
import os
import math
import logging
import time
import json
import re
import random
import tempfile
import json
from urllib.parse import urlencode
logger = logging.getLogger(__name__)
class GarminConnectService(ServiceBase):
ID = "garminconnect"
DisplayName = "Garmin Connect"
DisplayAbbreviation = "GC"
AuthenticationType = ServiceAuthenticationType.UsernamePassword
RequiresExtendedAuthorizationDetails = True
PartialSyncRequiresTrigger = len(GARMIN_CONNECT_USER_WATCH_ACCOUNTS) > 0
PartialSyncTriggerPollInterval = timedelta(minutes=20)
PartialSyncTriggerPollMultiple = len(GARMIN_CONNECT_USER_WATCH_ACCOUNTS.keys())
# +1 from default due to my embarrassing inability to...
# a) create a reasonable schema to allow for these updates.
# b) write a query to reset the counters in the existing schema.
DownloadRetryCount = 6
ConfigurationDefaults = {
"WatchUserKey": None,
"WatchUserLastID": 0
}
_activityMappings = {
"running": ActivityType.Running,
"cycling": ActivityType.Cycling,
"mountain_biking": ActivityType.MountainBiking,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"resort_skiing_snowboarding": ActivityType.DownhillSkiing,
"cross_country_skiing": ActivityType.CrossCountrySkiing,
"skate_skiing": ActivityType.CrossCountrySkiing, # Well, it ain't downhill?
"backcountry_skiing_snowboarding": ActivityType.CrossCountrySkiing, # ish
"skating": ActivityType.Skating,
"swimming": ActivityType.Swimming,
"rowing": ActivityType.Rowing,
"elliptical": ActivityType.Elliptical,
"fitness_equipment": ActivityType.Gym,
"rock_climbing": ActivityType.Climbing,
"mountaineering": ActivityType.Climbing,
"strength_training": ActivityType.StrengthTraining,
"stand_up_paddleboarding": ActivityType.StandUpPaddling,
"all": ActivityType.Other, # everything will eventually resolve to this
"multi_sport": ActivityType.Other # Most useless type? You decide!
}
_reverseActivityMappings = { # Removes ambiguities when mapping back to their activity types
"running": ActivityType.Running,
"cycling": ActivityType.Cycling,
"mountain_biking": ActivityType.MountainBiking,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"resort_skiing_snowboarding": ActivityType.DownhillSkiing,
"cross_country_skiing": ActivityType.CrossCountrySkiing,
"skating": ActivityType.Skating,
"swimming": ActivityType.Swimming,
"rowing": ActivityType.Rowing,
"elliptical": ActivityType.Elliptical,
"fitness_equipment": ActivityType.Gym,
"rock_climbing": ActivityType.Climbing,
"strength_training": ActivityType.StrengthTraining,
"stand_up_paddleboarding": ActivityType.StandUpPaddling,
"other": ActivityType.Other # I guess? (vs. "all" that is)
}
SupportedActivities = list(_activityMappings.values())
SupportsHR = SupportsCadence = True
SupportsActivityDeletion = True
_sessionCache = SessionCache("garminconnect", lifetime=timedelta(minutes=120), freshen_on_get=True)
_reauthAttempts = 1 # per request
_unitMap = {
"mph": ActivityStatisticUnit.MilesPerHour,
"kph": ActivityStatisticUnit.KilometersPerHour,
"hmph": ActivityStatisticUnit.HectometersPerHour,
"hydph": ActivityStatisticUnit.HundredYardsPerHour,
"celcius": ActivityStatisticUnit.DegreesCelcius,
"fahrenheit": ActivityStatisticUnit.DegreesFahrenheit,
"mile": ActivityStatisticUnit.Miles,
"kilometer": ActivityStatisticUnit.Kilometers,
"foot": ActivityStatisticUnit.Feet,
"meter": ActivityStatisticUnit.Meters,
"yard": ActivityStatisticUnit.Yards,
"kilocalorie": ActivityStatisticUnit.Kilocalories,
"bpm": ActivityStatisticUnit.BeatsPerMinute,
"stepsPerMinute": ActivityStatisticUnit.DoubledStepsPerMinute,
"rpm": ActivityStatisticUnit.RevolutionsPerMinute,
"watt": ActivityStatisticUnit.Watts,
"second": ActivityStatisticUnit.Seconds,
"ms": ActivityStatisticUnit.Milliseconds,
"mps": ActivityStatisticUnit.MetersPerSecond
}
_obligatory_headers = {
"Referer": "https://sync.tapiriik.com"
}
_garmin_signin_headers = {
"origin": "https://sso.garmin.com"
}
def __init__(self):
cachedHierarchy = cachedb.gc_type_hierarchy.find_one()
if not cachedHierarchy:
rawHierarchy = requests.get("https://connect.garmin.com/modern/proxy/activity-service/activity/activityTypes", headers=self._obligatory_headers).text
self._activityHierarchy = json.loads(rawHierarchy)
cachedb.gc_type_hierarchy.insert({"Hierarchy": rawHierarchy})
else:
self._activityHierarchy = json.loads(cachedHierarchy["Hierarchy"])
# hashmaps for determining parent type key
self._typeKeyParentMap = {}
self._typeIdKeyMap = {}
for x in self._activityHierarchy:
self._typeKeyParentMap[x["typeKey"]] = x["parentTypeId"]
self._typeIdKeyMap[x["typeId"]] = x["typeKey"]
rate_lock_path = tempfile.gettempdir() + "/gc_rate.%s.lock" % HTTP_SOURCE_ADDR
# Ensure the rate lock file exists (...the easy way)
open(rate_lock_path, "a").close()
self._rate_lock = open(rate_lock_path, "r+")
def _rate_limit(self):
import fcntl, struct, time
min_period = 1 # I appear to been banned from Garmin Connect while determining this.
fcntl.flock(self._rate_lock,fcntl.LOCK_EX)
try:
self._rate_lock.seek(0)
last_req_start = self._rate_lock.read()
if not last_req_start:
last_req_start = 0
else:
last_req_start = float(last_req_start)
wait_time = max(0, min_period - (time.time() - last_req_start))
time.sleep(wait_time)
self._rate_lock.seek(0)
self._rate_lock.write(str(time.time()))
self._rate_lock.flush()
finally:
fcntl.flock(self._rate_lock,fcntl.LOCK_UN)
def _request_with_reauth(self, req_lambda, serviceRecord=None, email=None, password=None, force_skip_cache=False):
for i in range(self._reauthAttempts + 1):
session = self._get_session(record=serviceRecord, email=email, password=password, skip_cache=(force_skip_cache or i > 0))
self._rate_limit()
result = req_lambda(session)
if result.status_code not in (403, 500):
return result
# Pass the failed response back any ways - another handler will catch it and provide a nicer error
return result
def _get_session(self, record=None, email=None, password=None, skip_cache=False):
from tapiriik.auth.credential_storage import CredentialStore
cached = self._sessionCache.Get(record.ExternalID if record else email)
if cached and not skip_cache:
logger.debug("Using cached credential")
return cached
if record:
# longing for C style overloads...
password = CredentialStore.Decrypt(record.ExtendedAuthorization["Password"])
email = CredentialStore.Decrypt(record.ExtendedAuthorization["Email"])
session = requests.Session()
# JSIG CAS, cool I guess.
# Not quite OAuth though, so I'll continue to collect raw credentials.
# Commented stuff left in case this ever breaks because of missing parameters...
data = {
"username": email,
"password": password,
"_eventId": "submit",
"embed": "true",
# "displayNameRequired": "false"
}
params = {
"service": "https://connect.garmin.com/modern",
# "redirectAfterAccountLoginUrl": "http://connect.garmin.com/modern",
# "redirectAfterAccountCreationUrl": "http://connect.garmin.com/modern",
# "webhost": "olaxpw-connect00.garmin.com",
"clientId": "GarminConnect",
"gauthHost": "https://sso.garmin.com/sso",
# "rememberMeShown": "true",
# "rememberMeChecked": "false",
"consumeServiceTicket": "false",
# "id": "gauth-widget",
# "embedWidget": "false",
# "cssUrl": "https://static.garmincdn.com/com.garmin.connect/ui/src-css/gauth-custom.css",
# "source": "http://connect.garmin.com/en-US/signin",
# "createAccountShown": "true",
# "openCreateAccount": "false",
# "usernameShown": "true",
# "displayNameShown": "false",
# "initialFocus": "true",
# "locale": "en"
}
# I may never understand what motivates people to mangle a perfectly good protocol like HTTP in the ways they do...
preResp = session.get("https://sso.garmin.com/sso/signin", params=params)
if preResp.status_code != 200:
raise APIException("SSO prestart error %s %s" % (preResp.status_code, preResp.text))
ssoResp = session.post("https://sso.garmin.com/sso/signin", headers=self._garmin_signin_headers, params=params, data=data, allow_redirects=False)
if ssoResp.status_code != 200 or "temporarily unavailable" in ssoResp.text:
raise APIException("SSO error %s %s" % (ssoResp.status_code, ssoResp.text))
if ">sendEvent('FAIL')" in ssoResp.text:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
if ">sendEvent('ACCOUNT_LOCKED')" in ssoResp.text:
raise APIException("Account Locked", block=True, user_exception=UserException(UserExceptionType.Locked, intervention_required=True))
if "renewPassword" in ssoResp.text:
raise APIException("Reset password", block=True, user_exception=UserException(UserExceptionType.RenewPassword, intervention_required=True))
# ...AND WE'RE NOT DONE YET!
self._rate_limit()
gcRedeemResp = session.get("https://connect.garmin.com/modern", allow_redirects=False)
if gcRedeemResp.status_code != 302:
raise APIException("GC redeem-start error %s %s" % (gcRedeemResp.status_code, gcRedeemResp.text))
url_prefix = "https://connect.garmin.com"
# There are 6 redirects that need to be followed to get the correct cookie
# ... :(
max_redirect_count = 7
current_redirect_count = 1
while True:
self._rate_limit()
url = gcRedeemResp.headers["location"]
# Fix up relative redirects.
if url.startswith("/"):
url = url_prefix + url
url_prefix = "/".join(url.split("/")[:3])
gcRedeemResp = session.get(url, allow_redirects=False)
if current_redirect_count >= max_redirect_count and gcRedeemResp.status_code != 200:
raise APIException("GC redeem %d/%d error %s %s" % (current_redirect_count, max_redirect_count, gcRedeemResp.status_code, gcRedeemResp.text))
if gcRedeemResp.status_code == 200 or gcRedeemResp.status_code == 404:
break
current_redirect_count += 1
if current_redirect_count > max_redirect_count:
break
self._sessionCache.Set(record.ExternalID if record else email, session)
session.headers.update(self._obligatory_headers)
return session
def WebInit(self):
self.UserAuthorizationURL = WEB_ROOT + reverse("auth_simple", kwargs={"service": self.ID})
def Authorize(self, email, password):
from tapiriik.auth.credential_storage import CredentialStore
session = self._get_session(email=email, password=password, skip_cache=True)
self._rate_limit()
try:
dashboard = session.get("http://connect.garmin.com/modern")
userdata_json_str = re.search(r"VIEWER_SOCIAL_PROFILE\s*=\s*JSON\.parse\((.+)\);$", dashboard.text, re.MULTILINE).group(1)
userdata = json.loads(json.loads(userdata_json_str))
username = userdata["displayName"]
except Exception as e:
raise APIException("Unable to retrieve username: %s" % e, block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
return (username, {}, {"Email": CredentialStore.Encrypt(email), "Password": CredentialStore.Encrypt(password)})
def UserUploadedActivityURL(self, uploadId):
return "https://connect.garmin.com/modern/activity/%d" % uploadId
def _resolveActivityType(self, act_type):
# Mostly there are two levels of a hierarchy, so we don't really need this as the parent is included in the listing.
# But maybe they'll change that some day?
while act_type not in self._activityMappings:
try:
act_type = self._typeIdKeyMap[self._typeKeyParentMap[act_type]]
except IndexError:
raise ValueError("Activity type not found in activity hierarchy")
return self._activityMappings[act_type]
def DownloadActivityList(self, serviceRecord, exhaustive=False):
#https://connect.garmin.com/modern/proxy/activitylist-service/activities/search/activities?limit=20&start=0
page = 1
pageSz = 100
activities = []
exclusions = []
force_reauth = False
while True:
logger.debug("Req with " + str({"start": (page - 1) * pageSz, "limit": pageSz}))
res = self._request_with_reauth(lambda session: session.get("https://connect.garmin.com/modern/proxy/activitylist-service/activities/search/activities", params={"start": (page - 1) * pageSz, "limit": pageSz}), serviceRecord, force_skip_cache=force_reauth)
force_reauth = False
try:
res = res.json()
except ValueError:
res_txt = res.text # So it can capture in the log message
raise APIException("Parse failure in GC list resp: %s - %s" % (res.status_code, res_txt))
for act in res:
if "ROLE_SYSTEM" in act["userRoles"]:
# GC for some reason return test data set instead of 401 for unauthorized call
force_reauth = True
break
activity = UploadedActivity()
# stationary activities have movingDuration = None while non-gps static activities have 0.0
activity.Stationary = act["movingDuration"] is None
activity.GPS = act["hasPolyline"]
activity.Private = act["privacy"]["typeKey"] == "private"
activity_name = act["activityName"]
logger.debug("Name " + activity_name if activity_name is not None else "Untitled" + ":")
if activity_name is not None and len(activity_name.strip()) and activity_name != "Untitled": # This doesn't work for internationalized accounts, oh well.
activity.Name = activity_name
activity_description = act["description"]
if activity_description is not None and len(activity_description.strip()):
activity.Notes = activity_description
activity.StartTime = pytz.utc.localize(datetime.strptime(act["startTimeGMT"], "%Y-%m-%d %H:%M:%S"))
if act["elapsedDuration"] is not None:
activity.EndTime = activity.StartTime + timedelta(0, float(act["elapsedDuration"])/1000)
else:
activity.EndTime = activity.StartTime + timedelta(0, float(act["duration"]))
logger.debug("Activity s/t " + str(activity.StartTime) + " on page " + str(page))
if "distance" in act and act["distance"] and float(act["distance"]) != 0:
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=float(act["distance"]))
activity.Type = self._resolveActivityType(act["activityType"]["typeKey"])
activity.CalculateUID()
activity.ServiceData = {"ActivityID": int(act["activityId"])}
activities.append(activity)
if force_reauth:
# Re-run activity listing
continue
logger.debug("Finished page " + str(page))
if not exhaustive or len(res) == 0:
break
else:
page += 1
return activities, exclusions
def _downloadActivitySummary(self, serviceRecord, activity):
activityID = activity.ServiceData["ActivityID"]
summary_resp = self._request_with_reauth(lambda session: session.get("https://connect.garmin.com/modern/proxy/activity-service/activity/" + str(activityID)), serviceRecord)
try:
summary_data = summary_resp.json()
except ValueError:
raise APIException("Failure downloading activity summary %s:%s" % (summary_resp.status_code, summary_resp.text))
stat_map = {}
def mapStat(gcKey, statKey, type, units):
stat_map[gcKey] = {
"key": statKey,
"attr": type,
"units": units
}
def applyStats(gc_dict, stats_obj):
for gc_key, stat in stat_map.items():
if gc_key in gc_dict:
value = float(gc_dict[gc_key])
if math.isinf(value):
continue # GC returns the minimum speed as "-Infinity" instead of 0 some times :S
getattr(stats_obj, stat["key"]).update(ActivityStatistic(stat["units"], **({stat["attr"]: value})))
mapStat("movingDuration", "MovingTime", "value", ActivityStatisticUnit.Seconds)
mapStat("duration", "TimerTime", "value", ActivityStatisticUnit.Seconds)
mapStat("distance", "Distance", "value", ActivityStatisticUnit.Meters)
mapStat("maxSpeed", "Speed", "max", ActivityStatisticUnit.MetersPerSecond)
mapStat("averageSpeed", "Speed", "avg", ActivityStatisticUnit.MetersPerSecond)
mapStat("calories", "Energy", "value", ActivityStatisticUnit.Kilocalories)
mapStat("maxHR", "HR", "max", ActivityStatisticUnit.BeatsPerMinute)
mapStat("averageHR", "HR", "avg", ActivityStatisticUnit.BeatsPerMinute)
mapStat("minElevation", "Elevation", "min", ActivityStatisticUnit.Meters)
mapStat("maxElevation", "Elevation", "max", ActivityStatisticUnit.Meters)
mapStat("elevationGain", "Elevation", "gain", ActivityStatisticUnit.Meters)
mapStat("elevationLoss", "Elevation", "loss", ActivityStatisticUnit.Meters)
mapStat("averageBikeCadence", "Cadence", "avg", ActivityStatisticUnit.RevolutionsPerMinute)
mapStat("averageCadence", "Cadence", "avg", ActivityStatisticUnit.StepsPerMinute)
applyStats(summary_data["summaryDTO"], activity.Stats)
laps_resp = self._request_with_reauth(lambda session: session.get("https://connect.garmin.com/modern/proxy/activity-service/activity/%s/splits" % str(activityID)), serviceRecord)
try:
laps_data = laps_resp.json()
except ValueError:
raise APIException("Failure downloading activity laps summary %s:%s" % (laps_resp.status_code, laps_resp.text))
for lap_data in laps_data["lapDTOs"]:
lap = Lap()
if "startTimeGMT" in lap_data:
lap.StartTime = pytz.utc.localize(datetime.strptime(lap_data["startTimeGMT"], "%Y-%m-%dT%H:%M:%S.0"))
elapsed_duration = None
if "elapsedDuration" in lap_data:
elapsed_duration = timedelta(seconds=round(float(lap_data["elapsedDuration"])))
elif "duration" in lap_data:
elapsed_duration = timedelta(seconds=round(float(lap_data["duration"])))
if lap.StartTime and elapsed_duration:
# Always recalculate end time based on duration, if we have the start time
lap.EndTime = lap.StartTime + elapsed_duration
if not lap.StartTime and lap.EndTime and elapsed_duration:
# Sometimes calculate start time based on duration
lap.StartTime = lap.EndTime - elapsed_duration
if not lap.StartTime or not lap.EndTime:
# Garmin Connect is weird.
raise APIExcludeActivity("Activity lap has no BeginTimestamp or EndTimestamp", user_exception=UserException(UserExceptionType.Corrupt))
applyStats(lap_data, lap.Stats)
activity.Laps.append(lap)
# In Garmin Land, max can be smaller than min for this field :S
if activity.Stats.Power.Max is not None and activity.Stats.Power.Min is not None and activity.Stats.Power.Min > activity.Stats.Power.Max:
activity.Stats.Power.Min = None
def DownloadActivity(self, serviceRecord, activity):
# First, download the summary stats and lap stats
self._downloadActivitySummary(serviceRecord, activity)
if len(activity.Laps) == 1:
activity.Stats = activity.Laps[0].Stats # They must be identical to pass the verification
if activity.Stationary:
# Nothing else to download
return activity
# https://connect.garmin.com/modern/proxy/activity-service/activity/###/details
activityID = activity.ServiceData["ActivityID"]
res = self._request_with_reauth(lambda session: session.get("https://connect.garmin.com/modern/proxy/activity-service/activity/{}/details?maxSize=999999999".format(activityID)), serviceRecord)
try:
raw_data = res.json()
except ValueError:
raise APIException("Activity data parse error for %s: %s" % (res.status_code, res.text))
if "metricDescriptors" not in raw_data:
activity.Stationary = True # We were wrong, oh well
return activity
attrs_map = {}
def _map_attr(gc_key, wp_key, units, in_location=False, is_timestamp=False):
attrs_map[gc_key] = {
"key": wp_key,
"to_units": units,
"in_location": in_location, # Blegh
"is_timestamp": is_timestamp # See above
}
_map_attr("directSpeed", "Speed", ActivityStatisticUnit.MetersPerSecond)
_map_attr("sumDistance", "Distance", ActivityStatisticUnit.Meters)
_map_attr("directHeartRate", "HR", ActivityStatisticUnit.BeatsPerMinute)
_map_attr("directBikeCadence", "Cadence", ActivityStatisticUnit.RevolutionsPerMinute)
_map_attr("directDoubleCadence", "RunCadence", ActivityStatisticUnit.StepsPerMinute) # 2*x mystery solved
_map_attr("directAirTemperature", "Temp", ActivityStatisticUnit.DegreesCelcius)
_map_attr("directPower", "Power", ActivityStatisticUnit.Watts)
_map_attr("directElevation", "Altitude", ActivityStatisticUnit.Meters, in_location=True)
_map_attr("directLatitude", "Latitude", None, in_location=True)
_map_attr("directLongitude", "Longitude", None, in_location=True)
_map_attr("directTimestamp", "Timestamp", None, is_timestamp=True)
# Figure out which metrics we'll be seeing in this activity
attrs_indexed = {}
for measurement in raw_data["metricDescriptors"]:
key = measurement["key"]
if key in attrs_map:
if attrs_map[key]["to_units"]:
attrs_map[key]["from_units"] = self._unitMap[measurement["unit"]["key"]]
if attrs_map[key]["to_units"] == attrs_map[key]["from_units"]:
attrs_map[key]["to_units"] = attrs_map[key]["from_units"] = None
attrs_indexed[measurement["metricsIndex"]] = attrs_map[key]
# Process the data frames
frame_idx = 0
active_lap_idx = 0
for frame in raw_data["activityDetailMetrics"]:
wp = Waypoint()
for idx, attr in attrs_indexed.items():
value = frame["metrics"][idx]
target_obj = wp
if attr["in_location"]:
if not wp.Location:
wp.Location = Location()
target_obj = wp.Location
# Handle units
if attr["is_timestamp"]:
value = pytz.utc.localize(datetime.utcfromtimestamp(value / 1000))
elif attr["to_units"]:
value = ActivityStatistic.convertValue(value, attr["from_units"], attr["to_units"])
# Write the value (can't use __dict__ because __slots__)
setattr(target_obj, attr["key"], value)
# Fix up lat/lng being zero (which appear to represent missing coords)
if wp.Location and wp.Location.Latitude == 0 and wp.Location.Longitude == 0:
wp.Location.Latitude = None
wp.Location.Longitude = None
# Please visit a physician before complaining about this
if wp.HR == 0:
wp.HR = None
# Bump the active lap if required
while (active_lap_idx < len(activity.Laps) - 1 and # Not the last lap
activity.Laps[active_lap_idx + 1].StartTime <= wp.Timestamp):
active_lap_idx += 1
activity.Laps[active_lap_idx].Waypoints.append(wp)
frame_idx += 1
return activity
def UploadActivity(self, serviceRecord, activity):
#/proxy/upload-service-1.1/json/upload/.fit
fit_file = FITIO.Dump(activity)
files = {"data": ("tap-sync-" + str(os.getpid()) + "-" + activity.UID + ".fit", fit_file)}
res = self._request_with_reauth(
lambda session: session.post("https://connect.garmin.com/modern/proxy/upload-service/upload/.fit",
files=files,
headers={"nk": "NT"}),
serviceRecord)
try:
res = res.json()["detailedImportResult"]
except ValueError:
raise APIException("Bad response during GC upload: %s %s" % (res.status_code, res.text))
if len(res["successes"]) == 0:
if len(res["failures"]) and len(res["failures"][0]["messages"]):
if res["failures"][0]["messages"][0]["content"] == "Duplicate activity":
logger.debug("Duplicate")
return # ...cool?
if res["failures"][0]["messages"][0]["content"] == "The user is from EU location, but upload consent is not yet granted or revoked":
raise APIException("EU user with no upload consent", block=True, user_exception=UserException(UserExceptionType.GCUploadConsent, intervention_required=True))
raise APIException("Unable to upload activity %s" % res)
if len(res["successes"]) > 1:
raise APIException("Uploaded succeeded, resulting in too many activities")
actid = res["successes"][0]["internalId"]
name = activity.Name # Capture in logs
notes = activity.Notes
# Update activity metadata not included in the FIT file.
metadata_object = {}
if activity.Name and activity.Name.strip():
metadata_object["activityName"] = activity.Name
if activity.Notes and activity.Notes.strip():
metadata_object["description"] = activity.Notes
if activity.Type not in [ActivityType.Running, ActivityType.Cycling, ActivityType.Other]:
# Set the legit activity type - whatever it is, it's not supported by the FIT schema
acttype = [k for k, v in self._reverseActivityMappings.items() if v == activity.Type]
if len(acttype) == 0:
raise APIWarning("GarminConnect does not support activity type " + activity.Type)
else:
acttype = acttype[0]
metadata_object["activityTypeDTO"] = {"typeKey": acttype}
if activity.Private:
metadata_object["accessControlRuleDTO"] = {"typeKey": "private"}
if metadata_object:
metadata_object["activityId"] = actid
encoding_headers = {"Content-Type": "application/json; charset=UTF-8"} # GC really, really needs this part, otherwise it throws obscure errors like "Invalid signature for signature method HMAC-SHA1"
res = self._request_with_reauth(lambda session: session.put("https://connect.garmin.com/proxy/activity-service/activity/" + str(actid), data=json.dumps(metadata_object), headers=encoding_headers), serviceRecord)
if res.status_code != 204:
raise APIWarning("Unable to set activity metadata - %d %s" % (res.status_code, res.text))
return actid
def _user_watch_user(self, serviceRecord):
if not serviceRecord.GetConfiguration()["WatchUserKey"]:
user_key = random.choice(list(GARMIN_CONNECT_USER_WATCH_ACCOUNTS.keys()))
logger.info("Assigning %s a new watch user %s" % (serviceRecord.ExternalID, user_key))
serviceRecord.SetConfiguration({"WatchUserKey": user_key})
return GARMIN_CONNECT_USER_WATCH_ACCOUNTS[user_key]
else:
return GARMIN_CONNECT_USER_WATCH_ACCOUNTS[serviceRecord.GetConfiguration()["WatchUserKey"]]
def SubscribeToPartialSyncTrigger(self, serviceRecord):
# PUT http://connect.garmin.com/proxy/userprofile-service/connection/request/cpfair
# (the poll worker finishes the connection)
user_name = self._user_watch_user(serviceRecord)["Name"]
logger.info("Requesting connection to %s from %s" % (user_name, serviceRecord.ExternalID))
self._rate_limit()
resp = self._get_session(record=serviceRecord, skip_cache=True).put("https://connect.garmin.com/proxy/userprofile-service/connection/request/%s" % user_name)
try:
assert resp.status_code == 200
assert resp.json()["requestStatus"] == "Created"
except:
raise APIException("Connection request failed with user watch account %s: %s %s" % (user_name, resp.status_code, resp.text))
else:
serviceRecord.SetConfiguration({"WatchConnectionID": resp.json()["id"]})
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
# GET http://connect.garmin.com/proxy/userprofile-service/socialProfile/connections to get the ID
# {"fullName":null,"userConnections":[{"userId":5754439,"displayName":"TapiirikAPITEST","fullName":null,"location":null,"profileImageUrlMedium":null,"profileImageUrlSmall":null,"connectionRequestId":1566024,"userConnectionStatus":2,"userRoles":["ROLE_CONNECTUSER","ROLE_FITNESS_USER"],"userPro":false}]}
# PUT http://connect.garmin.com/proxy/userprofile-service/connection/end/1904201
# Unfortunately there's no way to delete a pending request - the poll worker will do this from the other end
active_watch_user = self._user_watch_user(serviceRecord)
session = self._get_session(email=active_watch_user["Username"], password=active_watch_user["Password"], skip_cache=True)
if "WatchConnectionID" in serviceRecord.GetConfiguration():
self._rate_limit()
dc_resp = session.put("https://connect.garmin.com/modern/proxy/userprofile-service/connection/end/%s" % serviceRecord.GetConfiguration()["WatchConnectionID"])
if dc_resp.status_code != 200:
raise APIException("Error disconnecting user watch accunt %s from %s: %s %s" % (active_watch_user, serviceRecord.ExternalID, dc_resp.status_code, dc_resp.text))
serviceRecord.SetConfiguration({"WatchUserKey": None, "WatchConnectionID": None})
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
else:
# I broke Garmin Connect by having too many connections per account, so I can no longer query the connection list
# All the connection request emails are sitting unopened in an email inbox, though, so I'll be backfilling the IDs from those
raise APIException("Did not store connection ID")
def ShouldForcePartialSyncTrigger(self, serviceRecord):
# The poll worker can't see private activities.
return serviceRecord.GetConfiguration()["sync_private"]
def PollPartialSyncTrigger(self, multiple_index):
# TODO: ensure the appropriate users are connected
# GET http://connect.garmin.com/modern/proxy/userprofile-service/connection/pending to get ID
# [{"userId":6244126,"displayName":"tapiriik-sync-ulukhaktok","fullName":"tapiriik sync ulukhaktok","profileImageUrlSmall":null,"connectionRequestId":1904086,"requestViewed":true,"userRoles":["ROLE_CONNECTUSER"],"userPro":false}]
# PUT http://connect.garmin.com/proxy/userprofile-service/connection/accept/1904086
# ...later...
# GET http://connect.garmin.com/proxy/activitylist-service/activities/comments/subscriptionFeed?start=1&limit=10
# First, accept any pending connections
watch_user_key = sorted(list(GARMIN_CONNECT_USER_WATCH_ACCOUNTS.keys()))[multiple_index]
watch_user = GARMIN_CONNECT_USER_WATCH_ACCOUNTS[watch_user_key]
logger.debug("Initiating session for watch user %s", watch_user["Username"])
sess_args = {
"email": watch_user["Username"],
"password": watch_user["Password"]
}
# These seems to fail with a 500 (talkking about a timeout) the first time, so keep trying.
SERVER_ERROR_RETRIES = 10
PAGE_SIZE = 100
TOTAL_SIZE = 1000
# Then, check for users with new activities
watch_activities = []
for i in range(1, TOTAL_SIZE, PAGE_SIZE):
for x in range(SERVER_ERROR_RETRIES):
logger.debug("Fetching activity list from %d - attempt %d", i, x)
watch_activities_resp = self._request_with_reauth(
lambda session: session.get("https://connect.garmin.com/modern/proxy/activitylist-service/activities/subscriptionFeed",
params={"limit": PAGE_SIZE, "start": i}),
**sess_args)
if watch_activities_resp.status_code != 500:
break
try:
watch_activities += watch_activities_resp.json()["activityList"]
except ValueError:
raise Exception("Could not parse new activities list: %s %s" % (watch_activities_resp.status_code, watch_activities_resp.text))
active_user_pairs = [(x["ownerDisplayName"], x["activityId"]) for x in watch_activities]
active_user_pairs.sort(key=lambda x: x[1]) # Highest IDs last (so they make it into the dict, supplanting lower IDs where appropriate)
active_users = dict(active_user_pairs)
active_user_recs = [ServiceRecord(x) for x in db.connections.find({"ExternalID": {"$in": list(active_users.keys())}, "Service": "garminconnect"}, {"Config": 1, "ExternalID": 1, "Service": 1})]
if len(active_user_recs) != len(active_users.keys()):
logger.warning("Mismatch %d records found for %d active users" % (len(active_user_recs), len(active_users.keys())))
to_sync_ids = []
for active_user_rec in active_user_recs:
last_active_id = active_user_rec.GetConfiguration()["WatchUserLastID"]
this_active_id = active_users[active_user_rec.ExternalID]
if this_active_id > last_active_id:
to_sync_ids.append(active_user_rec.ExternalID)
active_user_rec.SetConfiguration({"WatchUserLastID": this_active_id, "WatchUserKey": watch_user_key})
for x in range(SERVER_ERROR_RETRIES):
self._rate_limit()
logger.debug("Fetching connection request list - attempt %d", x)
pending_connections_resp = self._request_with_reauth(
lambda session: session.get("https://connect.garmin.com/modern/proxy/userprofile-service/connection/pending"),
**sess_args)
if pending_connections_resp.status_code != 500:
break
try:
pending_connections = pending_connections_resp.json()
except ValueError:
logger.error("Could not parse pending connection requests: %s %s" % (pending_connections_resp.status_code, pending_connections_resp.text))
else:
valid_pending_connections_external_ids = [x["ExternalID"] for x in db.connections.find({"Service": "garminconnect", "ExternalID": {"$in": [x["displayName"] for x in pending_connections]}}, {"ExternalID": 1})]
logger.info("Accepting %d, denying %d connection requests for %s" % (len(valid_pending_connections_external_ids), len(pending_connections) - len(valid_pending_connections_external_ids), watch_user_key))
for pending_connect in pending_connections:
if pending_connect["displayName"] in valid_pending_connections_external_ids:
self._rate_limit()
connect_resp = self._request_with_reauth(
lambda session: session.put("https://connect.garmin.com/modern/proxy/userprofile-service/connection/accept/%s" % pending_connect["connectionRequestId"]),
**sess_args)
if connect_resp.status_code != 200:
logger.error("Error accepting request on watch account %s: %s %s" % (watch_user["Name"], connect_resp.status_code, connect_resp.text))
else:
self._rate_limit()
self._request_with_reauth(
lambda session: session.put("https://connect.garmin.com/modern/proxy/userprofile-service/connection/decline/%s" % pending_connect["connectionRequestId"]),
**sess_args)
return to_sync_ids
def RevokeAuthorization(self, serviceRecord):
# nothing to do here...
pass
def DeleteCachedData(self, serviceRecord):
# nothing cached...
pass
def DeleteActivity(self, serviceRecord, uploadId):
session = self._get_session(record=serviceRecord)
self._rate_limit()
del_res = session.delete("https://connect.garmin.com/modern/proxy/activity-service/activity/%d" % uploadId)
del_res.raise_for_status()
|
cpfair/tapiriik
|
tapiriik/services/GarminConnect/garminconnect.py
|
Python
|
apache-2.0
| 41,473
|
[
"VisIt"
] |
129e1bf96f6f8180f1b5b31a16f79683dfe91895a0e23270cb64869e73159b0e
|
"""
.. module:: CosmicX
:synopsis: Remove cosmic rays from an astronomical image.
kludged from PyCosmic to run _lacosmicx instead, which is faster
"""
import time
import argparse
import numpy as np
import astropy.io.fits as pf
try:
import _lacosmicx
except ImportError:
print("Please install lacosmicx from github.com/cmccully/lacosmicx.")
quit()
__version__ = "0.1"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""
Program to detect cosmics in single exposure CCD frames. Important: The image and the readout noise are assumed to be in units of electrons.
The image also needs to be BIAS subtracted! The gain can be entered to convert the image from ADUs to electros, when this is down already set gain=1.0 as the default. A bad pixel mask of cosmics and a cleaned image will be provided by the routine as an output.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter,prog='CosmicX.py')
parser.add_argument("raw", type=str, help="""File name of the CCD raw frame FITS file from which to detect and reject cosmic ray hits. This frame is expected to be BIAS subtracted. If it is not provided in units of electrons please provide the appropriate gain factor to convert from ADUs to electrons with the --gain parameter.
""")
parser.add_argument("clean", type=str, help="""File name of the cosmics cleaned frame to be stored as a FITS file.
""")
parser.add_argument("mask", type=str, help="""File name of the cosmics mask frame to be stored as a FITS file.
""")
parser.add_argument("--minexptime", type=float, default=60.0, help="""Minimum exposure time to run: below this don't run.""")
parser.add_argument("--pssl", type=float, default=0.0, help="""Previously subtracted sky level.""")
parser.add_argument("--gain", type=float, default=2.2, help="""CCD gain as float number when the bias-subtracted image was not yet converted to electrons.""")
parser.add_argument("--readnoise", type=float, default=10.0, help="""CCD read-out noise in electrons.""")
parser.add_argument("--sigclip", type=float, default=5.0, help="""Threshold value for the significance level of cosmics in units of the expected noise for the pixels.""")
parser.add_argument("--sigfrac", type=float, default=0.3, help="""Fractional detection limit for neighbouring pixels.""")
parser.add_argument("--objlim", type=float, default=5.0, help="""Minimum contrast between laplacian image and fine structure image. Use 5.0 if your image is undersampled, HST, ...""")
parser.add_argument("--psffwhm", type=float, default=2.5, help="""Full Width Half Maximum of the PSF to use to generate the kernel.""")
parser.add_argument("--fsmode", choices=['median', 'convolve'], default='median', help="""Method to build the fine structure image:
'median': Use the median filter in the standard LA Cosmic algorithm
'convolve': Convolve the image with the psf kernel to calculate the
fine structure image.""")
parser.add_argument("--psfmodel", choices=['gauss', 'gaussx', 'gaussy', 'moffat'], default='gauss', help="""Model to use to generate the psf kernel if --fsmode 'convolve' and
psfk is None. The current choices are Gaussian and Moffat profiles.
'gauss' and 'moffat' produce circular PSF kernels. The 'gaussx' and
'gaussy' produce Gaussian kernels in the x and y directions
respectively.""")
parser.add_argument("--satlevel", type=float, default=50000.0, help="""If we find agglomerations of pixels above this level, we consider it to be a saturated star and do not try to correct and pixels around it. A negative satlevel skips this feature.""")
parser.add_argument("--verbose", action="store_true", default=False, help="""Flag to print some progress information on the screen.""")
parser.add_argument("--sepmed", action="store_true", default=False, help="""Flag to use separable median (faster).""")
parser.add_argument("--niter", type=int, default=4, help="""Number of iteration to be performed by the algorithms. Usually 5-6 iterations are needed to converge to a stable solution.""")
args = parser.parse_args()
f = pf.open(args.raw)
header = f[0].header
array = np.array(f[0].data, dtype=np.float32)
f.close()
if header['EXPTIME'] >= args.minexptime:
mask, clean = _lacosmicx.lacosmicx(array, gain=args.gain, readnoise=args.readnoise, psffwhm=args.psffwhm, sigclip=args.sigclip, sigfrac=args.sigfrac, objlim=args.objlim, fsmode=args.fsmode, psfmodel=args.psfmodel, verbose=args.verbose, sepmed=args.sepmed)
header['history'] = "LA CosmicX: cleaned cosmic rays"
header['history'] = "LA CosmicX params: sigclip=%5.2f sigfrac=%5.2f objlim=%5.2f" % (args.sigclip, args.sigfrac, args.objlim)
header['history'] = "LA CosmicX params: fsmode=%s psfmodel=%s psffwhm=%5.2f" % (args.fsmode, args.psfmodel, args.psffwhm)
header['history'] = "LA CosmicX params: sepmed=%s minexptime=%f" % (args.sepmed, args.minexptime)
header['history'] = "LA CosmicX run on %s" % time.strftime("%c")
pf.writeto(args.clean, clean, header)
mask = np.cast["uint8"](mask)
pf.writeto(args.mask, mask, header)
else:
header['history'] = "LA CosmicX: exptime < minexptime=%.1f" % args.minexptime
pf.writeto(args.clean, array, header)
|
scizen9/kpy
|
SEDMr/CosmicX.py
|
Python
|
gpl-2.0
| 5,410
|
[
"Gaussian"
] |
0d9d6b01368f73614e680725842bf017451676c161a4dfe5b074c2d86f159bd0
|
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import re
__ELEMENTAL_MASSES = {
'H': 1.007825032,
'He': 3.01602932,
'Li': 6.015122887,
'Be': 9.012183065,
'B': 10.01293695,
'C': 12,
'N': 14.003074,
'O': 15.99491462,
'F': 18.99840316,
'Ne': 19.99244018,
'Na': 22.98976928,
'Mg': 23.9850417,
'Al': 26.98153853,
'Si': 27.97692653,
'P': 30.973762,
'S': 31.97207117,
'Cl': 34.96885268,
'Ar': 35.96754511,
'K': 38.96370649,
'Ca': 39.96259086,
'Sc': 44.95590828,
'Ti': 45.95262772,
'Cr': 49.94604183,
'V': 49.94715601,
'Fe': 53.93960899,
'Mn': 54.93804391,
'Ni': 57.93534241,
'Co': 58.93319429,
'Cu': 62.92959772,
'Zn': 63.92914201,
'Ga': 68.9255735,
'Ge': 69.92424875,
'Se': 73.92247593,
'As': 74.92159457,
'Kr': 77.92036494,
'Br': 78.9183376,
'Sr': 83.9134191,
'Rb': 84.91178974,
'Y': 88.9058403,
'Zr': 89.9046977,
'Mo': 91.90680796,
'Nb': 92.906373,
'Ru': 95.90759025,
'Tc': 96.9063667,
'Pd': 101.9056022,
'Rh': 102.905498,
'Cd': 105.9064599,
'Ag': 106.9050916,
'Sn': 111.9048239,
'In': 112.9040618,
'Te': 119.9040593,
'Sb': 120.903812,
'Xe': 123.905892,
'I': 126.9044719,
'Ba': 129.9063207,
'Cs': 132.905452,
'Ce': 135.9071292,
'La': 137.9071149,
'Pr': 140.9076576,
'Nd': 141.907729,
'Sm': 143.9120065,
'Pm': 144.9127559,
'Eu': 150.9198578,
'Gd': 151.9197995,
'Dy': 155.9242847,
'Tb': 158.9253547,
'Er': 161.9287884,
'Ho': 164.9303288,
'Yb': 167.9338896,
'Tm': 168.9342179,
'Hf': 173.9400461,
'Lu': 174.9407752,
'W': 179.9467108,
'Ta': 179.9474648,
'Os': 183.9524885,
'Re': 184.9529545,
'Pt': 189.9599297,
'Ir': 190.9605893,
'Hg': 195.9658326,
'Au': 196.9665688,
'Tl': 202.9723446,
'Pb': 203.973044,
'Bi': 208.9803991,
'Po': 208.9824308,
'At': 209.9871479,
'Rn': 210.9906011,
'Ra': 223.0185023,
'Fr': 223.019736,
'Ac': 227.0277523,
'Th': 230.0331341,
'Pa': 231.0358842,
'U': 233.0396355,
'Np': 236.04657,
'Pu': 238.0495601,
'Am': 241.0568293,
'Cm': 243.0613893,
'Bk': 247.0703073,
'Cf': 249.0748539,
'Es': 252.08298,
'Fm': 257.0951061,
'Md': 258.0984315,
'No': 259.10103,
'Lr': 262.10961,
'Rf': 267.12179,
'Db': 268.12567,
'Hs': 270.13429,
'Sg': 271.13393,
'Bh': 272.13826,
'Mt': 276.15159,
'Rg': 280.16514,
'Ds': 281.16451,
'Uut': 284.17873,
'Cn': 285.17712,
'Uup': 288.19274,
'Fl': 289.19042,
'Uus': 292.20746,
'Lv': 293.20449,
'Uuo': 294.21392,
}
def get_molecular_mass(formula, r_mass=float('NaN')):
'''Calculate and return molecular mass from chemical formula.'''
# Handle R-groups with 'dummy' mass:
if 'R' in formula and r_mass:
elem_masses = __ELEMENTAL_MASSES.copy()
elem_masses['R'] = r_mass
else:
elem_masses = __ELEMENTAL_MASSES
return sum([elem_masses[element] * count
if element in elem_masses
else float('NaN')
for element, count in get_elem_comp(formula).items()])
def get_elem_comp(formula):
'''Gets elemental composition as a dict from formula.'''
elem_comp = {}
for term in re.findall('[A-Z]{1}[0-9]*[a-z]{0,1}[0-9]*', formula):
element = re.search('[A-z]*', term).group(0)
result = re.search('[0-9]+', term)
elem_comp[element] = int(result.group(0)) if result else 1
return elem_comp
def parse_equation(equation, separator='='):
'''Parses chemical equation strings.'''
equation_terms = [re.split('\\s+\\+\\s+', equation_side)
for equation_side in
re.split('\\s*' + separator + '\\s*', equation)]
# Add reactants and products:
return _get_reaction_participants(equation_terms[0], -1) + \
_get_reaction_participants(equation_terms[1], 1)
def _get_reaction_participants(equation_term, stoich_factor):
'''Adds reaction participants to a list of participants.'''
if len(equation_term) == 1 and not equation_term[0]:
return []
all_terms = [participant.split() for participant in equation_term]
return [[terms[0], stoich_factor]
if len(terms) == 1
else [terms[1], stoich_factor * float(terms[0])]
for terms in all_terms]
|
synbiochem/synbiochem-py
|
synbiochem/utils/chem_utils.py
|
Python
|
mit
| 4,634
|
[
"VisIt"
] |
9412657898363c93af9ad61d88a2eaf48c2c2f8ab7a529fecb84bbcd1cd50acd
|
# -*- coding: utf-8 -*-
'''
CuBIC is a statistical method for the detection of higher order of
correlations in parallel spike trains based on the analysis of the
cumulants of the population count.
Given a list sts of SpikeTrains, the analysis comprises the following
steps:
1) compute the population histogram (PSTH) with the desired bin size
>>> binsize = 5 * pq.ms
>>> pop_count = elephant.statistics.time_histogram(sts, binsize)
2) apply CuBIC to the population count
>>> alpha = 0.05 # significance level of the tests used
>>> xi, p_val, k = cubic(data, ximax=100, alpha=0.05, errorval=4.):
:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
:license: BSD, see LICENSE.txt for details.
'''
# -*- coding: utf-8 -*-
from __future__ import division
import scipy.stats
import scipy.special
import math
import warnings
# Based on matlab code by Benjamin Staude
# Adaptation to python by Pietro Quaglio and Emiliano Torre
def cubic(data, ximax=100, alpha=0.05):
'''
Performs the CuBIC analysis [1] on a population histogram, calculated from
a population of spiking neurons.
The null hypothesis :math:`H_0: k_3(data)<=k^*_{3,\\xi}` is iteratively
tested with increasing correlation order :math:`\\xi` (correspondent to
variable xi) until it is possible to accept, with a significance level alpha,
that :math:`\\hat{\\xi}` (corresponding to variable xi_hat) is the minimum
order of correlation necessary to explain the third cumulant
:math:`k_3(data)`.
:math:`k^*_{3,\\xi}` is the maximized third cumulant, supposing a Compund
Poisson Process (CPP) model for correlated spike trains (see [1])
with maximum order of correlation equal to :math:`\\xi`.
Parameters
----------
data : neo.AnalogSignal
The population histogram (count of spikes per time bin) of the entire
population of neurons.
ximax : int
The maximum number of iteration of the hypothesis test:
if it is not possible to compute the :math:`\\hat{\\xi}` before ximax
iteration the CuBIC procedure is aborted.
Default: 100
alpha : float
The significance level of the hypothesis tests perfomed.
Default: 0.05
Returns
-------
xi_hat : int
The minimum correlation order estimated by CuBIC, necessary to
explain the value of the third cumulant calculated from the population.
p : list
The ordred list of all the p-values of the hypothesis tests that have
been performed. If the maximum number of iteration ximax is reached the
last p-value is set to -4
kappa : list
The list of the first three cumulants of the data.
test_aborted : bool
Wheter the test was aborted because reached the maximum number of
iteration ximax
References
----------
[1]Staude, Rotter, Gruen, (2009) J. Comp. Neurosci
'''
# alpha in in the interval [0,1]
if alpha < 0 or alpha > 1:
raise ValueError(
'the significance level alpha (= %s) has to be in [0,1]' % alpha)
if not isinstance(ximax, int) or ximax < 0:
raise ValueError(
'The maximum number of iterations ximax(= %i) has to be a positive'
% alpha + ' integer')
# dict of all possible rate functions
try:
data = data.magnitude
except AttributeError:
pass
L = len(data)
# compute first three cumulants
kappa = _kstat(data)
xi_hat = 1
xi = 1
pval = 0.
p = []
test_aborted = False
# compute xi_hat iteratively
while pval < alpha:
xi_hat = xi
if xi > ximax:
warnings.warn('Test aborted, xihat= %i > ximax= %i' % (xi, ximax))
test_aborted = True
break
# compute p-value
pval = _H03xi(kappa, xi, L)
p.append(pval)
xi = xi + 1
return xi_hat, p, kappa, test_aborted
def _H03xi(kappa, xi, L):
'''
Computes the p_value for testing the :math:`H_0: k_3(data)<=k^*_{3,\\xi}`
hypothesis of CuBIC in the stationary rate version
Parameters
-----
kappa : list
The first three cumulants of the populaton of spike trains
xi : int
The the maximum order of correlation :math:`\\xi` supposed in the
hypothesis for which is computed the p value of :math:`H_0`
L : float
The length of the orginal population histogram on which is performed
the CuBIC analysis
Returns
-----
p : float
The p-value of the hypothesis tests
'''
# Check the order condition of the cumulants necessary to perform CuBIC
if kappa[1] < kappa[0]:
# p = errorval
kstar = [0]
raise ValueError(
'H_0 can not be tested:'
'kappa(2)= %f<%f=kappa(1)!!!' % (kappa[1], kappa[0]))
else:
# computation of the maximized cumulants
kstar = [_kappamstar(kappa[:2], i, xi) for i in range(2, 7)]
k3star = kstar[1]
# variance of third cumulant (from Stuart & Ord)
sigmak3star = math.sqrt(
kstar[4] / L + 9 * (kstar[2] * kstar[0] + kstar[1] ** 2) /
(L - 1) + 6 * L * kstar[0] ** 3 / ((L - 1) * (L - 2)))
# computation of the p-value (the third cumulant is supposed to
# be gaussian istribuited)
p = 1 - scipy.stats.norm(k3star, sigmak3star).cdf(kappa[2])
return p
def _kappamstar(kappa, m, xi):
'''
Computes maximized cumulant of order m
Parameters
-----
kappa : list
The first two cumulants of the data
xi : int
The :math:`\\xi` for which is computed the p value of :math:`H_0`
m : float
The order of the cumulant
Returns
-----
k_out : list
The maximized cumulant of order m
'''
if xi == 1:
kappa_out = kappa[1]
else:
kappa_out = \
(kappa[1] * (xi ** (m - 1) - 1) -
kappa[0] * (xi ** (m - 1) - xi)) / (xi - 1)
return kappa_out
def _kstat(data):
'''
Compute first three cumulants of a population count of a population of
spiking
See http://mathworld.wolfram.com/k-Statistic.html
Parameters
-----
data : numpy.aray
The population histogram of the population on which are computed
the cumulants
Returns
-----
kappa : list
The first three cumulants of the population count
'''
L = len(data)
if L == 0:
raise ValueError('The input data must be a non-empty array')
S = [(data ** r).sum() for r in range(1, 4)]
kappa = []
kappa.append(S[0] / float(L))
kappa.append((L * S[1] - S[0] ** 2) / (L * (L - 1)))
kappa.append(
(2 * S[0] ** 3 - 3 * L * S[0] * S[1] + L ** 2 * S[2]) / (
L * (L - 1) * (L - 2)))
return kappa
|
pietroquaglio/elephant
|
elephant/cubic.py
|
Python
|
bsd-3-clause
| 6,870
|
[
"Gaussian"
] |
e51618bfdb517f90c537195a4ba07c0f1200a73d26fe85209bb13cc8435a7c4c
|
# -*- coding: utf-8 -*-
"""
=================================================
==================== Common Configuration
================================================= """
# Which will be the default atlas?
# Options: Swanson, PaxinosWatson
ATLAS = "Swanson"
# Should multithreading be used when matching?
# If True, the debug logs will not be in sequential order and PDB debugging will break
MULTITHREAD = True
# We will attempt to reduce the image width to this size but maintain aspect ratio
# Default: 200
RESIZE_WIDTH = 200
"""
=================================================
==================== Algorithm Selection
================================================= """
# Should we use our implemented RANSAC?
# If false, OpenCV RANSAC is used
NEW_RANSAC = False
"""
=================================================
==================== Matching
================================================= """
# Neighbor distance ratio for the ratio test as per Lowe's SIFT paper
DISTANCE_RATIO = 0.8
# Number of minimum good matches needed to compare descriptors
# You need at least 4 to be able to estimate a homography
MIN_MATCH_COUNT = 4
# Should feature matching use the FLANN KDTree approach?
# If false, feature matching will be with BruteForce
MATCH_WITH_FLANN = True
# The color of the rectangle overlayed in the matching
MATCH_RECT_COLOR = (0, 255, 255)
"""
=================================================
==================== Match Discarding
================================================= """
# Homography matrices whose absolute value determinant is lower will be discarded
# Default: 0.001
HOMOGRAPHY_DETERMINANT_THRESHOLD = 1e-4
# When transforming corners using homography, should we allow non-convex shapes?
ALLOW_NON_CONVEX_CORNERS = False
# Moments larger than the threshold will be discarded
# Default: 2
HU_DISTANCE_THRESHOLD = 2e100
"""
=================================================
==================== RANSAC
================================================= """
# The higher the threshold, the lower the inliers
RANSAC_REPROJ_TRESHHOLD = 10
# How many RANSAC iterations should we perform?
# Default: 2000
RANSAC_MAX_ITERS = 2000
# Only OpenCV's RANSAC uses this
# Default: 0.99
RANSAC_CONFIDENCE = 0.99
"""
=================================================
==================== SIFT
================================================= """
# The larger the threshold, the less features are produced by the detector.
# Default: 0.08
SIFT_CONTRAST_THRESHOLD = 0.05
# The larger the threshold, the more features that are retained
# Default: 30
SIFT_EDGE_THRESHOLD = 100
# Sigma of Gaussian used by SIFT
# Reduce for images captured by a weak camera with soft lenses
# Default: 2
SIFT_SIGMA = 2
# Number of SIFT features to be extracted
# If 0, SIFT will decide the best number of features
SIFT_FEATURES = 0
# Larger means more features
SIFT_OCTAVE_LAYERS = 2
# Should we use ASIFT instead of regular SIFT?
USE_AFFINE = True
ASIFT_START = 0
ASIFT_END = 180
ASIFT_INC = 20.0
"""
=================================================
==================== Prototype User Interface
================================================= """
# Should the UI have region warping options?
UI_WARP = False
# Should the UI have region angle change options?
UI_ANGLE = False
# Should the UI show the region keypoints?
UI_SHOW_KP = True
# Should we save the region you select in the program?
UI_SAVE_REGION = False
# Should we save the results when they are selected?
UI_SAVE_RESULTS = True
"""
=================================================
==================== Logging
================================================= """
# The formatting string that will be used by the loggers
LOGGER_FORMAT_STRING = (
#u'[{record.time:%H:%M}] '
u'[{record.channel}:] {record.level_name}: {record.message}'
)
"""
=================================================
==================== FLANN-based Matcher
================================================= """
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_PARAMS = dict(algorithm=FLANN_INDEX_KDTREE,
trees=5)
FLANN_SEARCH_PARAMS = dict(checks=200)
"""
=================================================
==================== Atlas Automatic Set-Up
================================================= """
if (ATLAS.casefold == "Swanson".casefold):
NISSL_DEFAULT_FILE = 'dataset/testing/region-68.jpg'
NISSL_DIR = "dataset/atlas_swanson"
NISSL_PREFIX = "Level-"
NISSL_DIGITS = 2
NISSL_COUNT = 73
NISSL_EXT = ".jpg"
elif (ATLAS.casefold == "PaxinosWatson".casefold):
NISSL_DEFAULT_FILE = 'dataset/atlas_pw/RBSC7-060.jpg'
NISSL_DIR = 'dataset/atlas_pw'
NISSL_PREFIX = 'RBSC7-'
NISSL_DIGITS = 3
NISSL_COUNT = 161
NISSL_EXT = ".jpg"
else:
raise Exception("Atlas: " + ATLAS + " is not supported")
|
DeveloperJose/Vision-Rat-Brain
|
feature_matching_v2/config.py
|
Python
|
mit
| 4,836
|
[
"Gaussian"
] |
4ffac9bc2044d19fdbea092d1a0f48f1dbd9a8a89f48514539a65bf6d98f5164
|
# -*- coding: utf-8 -*-
"""Functions for inducing random sub-graphs."""
import bisect
import logging
import random
from operator import itemgetter
from typing import Any, Iterable, Mapping, Optional, Set, Tuple
from ..utils import remove_isolated_nodes
from ...graph import BELGraph
from ...pipeline import transformation
from ...utils import update_metadata
from ....dsl import BaseEntity
__all__ = [
"get_graph_with_random_edges",
"get_random_node",
"get_random_subgraph",
]
logger = logging.getLogger(__name__)
def _random_edge_iterator(graph: BELGraph, n_edges: int) -> Iterable[Tuple[BaseEntity, BaseEntity, int, Mapping]]:
"""Get a random set of edges from the graph and randomly samples a key from each.
:param graph: A BEL graph
:param n_edges: Number of edges to randomly select from the given graph
"""
edges = list(graph.edges())
edge_sample = random.sample(edges, n_edges)
for u, v in edge_sample:
keys = list(graph[u][v])
k = random.choice(keys)
yield u, v, k, graph[u][v][k]
@transformation
def get_graph_with_random_edges(graph: BELGraph, n_edges: int) -> BELGraph:
"""Build a new graph from a seeding of edges.
:param graph: A BEL graph
:param n_edges: Number of edges to randomly select from the given graph
"""
rv = graph.child()
rv.add_edges_from(_random_edge_iterator(graph, n_edges))
return rv
#: How many edges should be sampled from a graph that's still reasonable to display
SAMPLE_RANDOM_EDGE_COUNT = 250
#: How many edges should be sampled as "seed" edges
SAMPLE_RANDOM_EDGE_SEED_COUNT = 5
class WeightedRandomGenerator:
"""A weighted random number generator.
Adapted from: https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python
"""
def __init__(self, values, weights):
"""Build a weighted random generator.
:param Any values: A sequence corresponding to the weights
:param weights: Weights for each. Should all be positive, but not necessarily normalized.
"""
self.values = values
self.totals = []
weight_total = 0
for weight in weights:
weight_total += weight
self.totals.append(weight_total)
@property
def total(self):
"""Get the total weight stored."""
return self.totals[-1]
def next_index(self) -> int:
"""Get a random index."""
return bisect.bisect_right(self.totals, random.random() * self.total)
def next(self) -> Any:
"""Get a random value."""
return self.values[self.next_index()]
def get_random_node(
graph,
node_blacklist: Set[BaseEntity],
invert_degrees: Optional[bool] = None,
) -> Optional[BaseEntity]:
"""Choose a node from the graph with probabilities based on their degrees.
:type graph: networkx.Graph
:param node_blacklist: Nodes to filter out
:param invert_degrees: Should the degrees be inverted? Defaults to true.
"""
try:
nodes, degrees = zip(
*(
(node, degree)
for node, degree in sorted(graph.degree(), key=itemgetter(1))
if node not in node_blacklist
),
)
except ValueError: # something wrong with graph, probably no elements in graph.degree_iter
return
if invert_degrees is None or invert_degrees:
# More likely to choose low degree nodes to explore, so don't make hubs
degrees = [1 / degree for degree in degrees]
wrg = WeightedRandomGenerator(nodes, degrees)
return wrg.next() # noqa: B305
def _helper(
result,
graph,
number_edges_remaining: int,
node_blacklist: Set[BaseEntity],
invert_degrees: Optional[bool] = None,
) -> None:
"""Help build a random graph.
:type result: networkx.Graph
:type graph: networkx.Graph
"""
original_node_count = graph.number_of_nodes()
logger.debug("adding remaining %d edges", number_edges_remaining)
for _ in range(number_edges_remaining):
source, possible_step_nodes, c = None, set(), 0
while not source or not possible_step_nodes:
source = get_random_node(result, node_blacklist, invert_degrees=invert_degrees)
c += 1
if c >= original_node_count:
logger.warning("infinite loop happening")
logger.warning("source: %s", source)
logger.warning("no grow: %s", node_blacklist)
return # Happens when after exhausting the connected components. Try increasing the number seed edges
if source is None:
continue # maybe do something else?
# Only keep targets in the original graph that aren't in the result graph
possible_step_nodes = set(graph[source]) - set(result[source])
if not possible_step_nodes:
node_blacklist.add(
source,
) # there aren't any possible nodes to step to, so try growing from somewhere else
step_node = random.choice(list(possible_step_nodes))
# it's not really a big deal which, but it might be possible to weight this by the utility of edges later
key, attr_dict = random.choice(list(graph[source][step_node].items()))
result.add_edge(source, step_node, key=key, **attr_dict)
@transformation
def get_random_subgraph(
graph: BELGraph,
number_edges: Optional[int] = None,
number_seed_edges: Optional[int] = None,
seed: Optional[int] = None,
invert_degrees: Optional[bool] = None,
) -> BELGraph:
"""Generate a random subgraph based on weighted random walks from random seed edges.
:type graph: pybel.BELGraph graph
:param number_edges: Maximum number of edges. Defaults to
:data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250).
:param number_seed_edges: Number of nodes to start with (which likely results in different components
in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5).
:param seed: A seed for the random state
:param invert_degrees: Should the degrees be inverted? Defaults to true.
"""
if number_edges is None:
number_edges = SAMPLE_RANDOM_EDGE_COUNT
if number_seed_edges is None:
number_seed_edges = SAMPLE_RANDOM_EDGE_SEED_COUNT
if seed is not None:
random.seed(seed)
# Check if graph will sample full graph, and just return it if it would
if graph.number_of_edges() <= number_edges:
logger.info("sampled full graph")
return graph.copy()
logger.debug(
"getting random sub-graph with %d seed edges, %d final edges, and seed=%s",
number_seed_edges,
number_edges,
seed,
)
# Get initial graph with `number_seed_edges` edges
result = get_graph_with_random_edges(graph, number_seed_edges)
number_edges_remaining = number_edges - result.number_of_edges()
_helper(
result,
graph,
number_edges_remaining,
node_blacklist=set(), # This is the set of nodes that should no longer be chosen to grow from
invert_degrees=invert_degrees,
)
logger.debug("removing isolated nodes")
remove_isolated_nodes(result)
return result
|
pybel/pybel
|
src/pybel/struct/mutation/induction/random_subgraph.py
|
Python
|
mit
| 7,301
|
[
"Pybel"
] |
852b7de4400d985e4efb49decc25ad7e6eeb95e4d95cf6f7a40c618c46beb5ae
|
import unittest
import sam.sam_handler
import os
import subprocess
import Utility
import shutil
import tempfile
import sam_test_case
import config.settings as settings
settings.setup_logging()
# Simulation Configs
SIM_DIR = os.path.dirname(os.path.realpath(__file__)) + os.sep + "simulations"
SIM_DATA_FILENAME_PREFIX = "umberjack_unittest"
SIM_DATA_DIR = SIM_DIR + os.sep + "data" + os.sep + SIM_DATA_FILENAME_PREFIX
SIM_PIPELINE_PY = os.path.dirname(os.path.realpath(__file__)) + os.sep + "simulations" + os.sep + "sim_pipeline.py"
# INDELible dN/dS values that INDELible is aiming to simulate
INDELIBLE_DNDS_FILENAME = SIM_DATA_DIR + os.sep + "mixed" + os.sep + SIM_DATA_FILENAME_PREFIX + ".mixed.rates.csv"
# Sliding Window configs
POPN_CONSENSUS_FASTA = SIM_DATA_DIR + os.sep + "mixed" + os.sep + SIM_DATA_FILENAME_PREFIX + ".mixed.consensus.fasta"
REF = "consensus"
# Sam file for ART reads alignments against consensus of INDELible population
SIM_SAM = SIM_DATA_DIR + os.sep + "mixed" + os.sep + "aln" + os.sep + SIM_DATA_FILENAME_PREFIX + ".mixed.reads.consensus.bwa.sort.query"
OUT_DIR = SIM_DIR + os.sep + "out" + SIM_DATA_FILENAME_PREFIX + os.sep + REF
MAPQ_CUTOFF = 20 # alignment quality cutoff
MAX_PROP_N = 0.5 # maximum proportion of N bases in MSA-aligned sequence
READ_QUAL_CUTOFF = 20 # Phred quality score cutoff [0,40]
MIN_WINDOW_BREADTH_COV_FRACTION = 0.5
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + os.sep + "out"
TEST_MERGE_FASTQ = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "test.merge.fq"
TEST_PAIR_SELECTION_SAM = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "test.pairselection.sam"
TEST_PAIR_SELECTION_REMDUP_SAM = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "test.pairselection.remdup.sam"
TEST_PAIR_SELECTION_TARGET_REF = "targetref"
EXPECTED_TEST_PAIR_SELECTION_FULL_MSA_FASTA = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "test.pairselection.msa.fasta"
EXPECTED_TEST_PAIR_SELECTION_REMDUP_FULL_MSA_FASTA = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "test.pairselection.msa.remdup.fasta"
class TestSamHandler(unittest.TestCase):
def setUp(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR)
os.makedirs(TEST_DIR)
self.merge_testcases = sam_test_case.SamTestCase.parse_fastq(TEST_MERGE_FASTQ)
self.TMPSAM_REF1 = "ref1"
self.TMPSAM_REF1_LEN = 4
self.TMPSAM_REF2 = "ref2"
self.TMPSAM_REF2_LEN = 400
self.TMPSAM_REF_DNE = "!@#$"
self.TMPSAM_REF_DNE_LEN = None
# Unknown Sort Order Sam
self.tmpsam_unsort = tempfile.NamedTemporaryFile('w', suffix=".nosort.sam", dir=TEST_DIR, delete=False)
self.tmpsam_unsort.write("@HD\tVN:0.0\tSO:unknown\n")
self.tmpsam_unsort.write("@PG\tID:fakeid\tPN:programname\tCL:fake command line\tDS:desc\tVN:version0\n")
self.tmpsam_unsort.write("@CO\trandom comment1\n")
self.tmpsam_unsort.write("@CO\trandom comment2\n")
self.tmpsam_unsort.write("@SQ\tSN:{}\tLN:{}\tSP:fake species\n".format(self.TMPSAM_REF1, self.TMPSAM_REF1_LEN))
self.tmpsam_unsort.write("@SQ\tSN:{}\tLN:{}\tSP:fake species\n".format(self.TMPSAM_REF2, self.TMPSAM_REF2_LEN))
self.tmpsam_unsort.write("read1\t4\t*\t0\t49\t*\t*\t*\t0\tACGT\tHHHH")
self.tmpsam_unsort.flush() # flush python buffer
os.fsync(self.tmpsam_unsort.file.fileno()) # flush os buffer to disk
self.tmpsam_unsort.close()
# Queryname Sort Order Sam
self.tmpsam_query = tempfile.NamedTemporaryFile('w', suffix=".sort.query.sam", dir=TEST_DIR, delete=False)
self.tmpsam_query.write("@HD\tVN:0.0\tSO:queryname\n")
self.tmpsam_query.write("@PG\tID:fakeid\tPN:programname\tCL:fake command line\tDS:desc\tVN:version0\n")
self.tmpsam_query.write("@CO\trandom comment1\n")
self.tmpsam_query.write("@CO\trandom comment2\n")
self.tmpsam_query.write("@SQ\tSN:ref1\tLN:4\tSP:fake species\n")
self.tmpsam_query.write("@SQ\tSN:ref2\tLN:400\tSP:fake species\n")
self.tmpsam_query.write("read1\t4\t*\t0\t49\t*\t*\t*\t0\tACGT\tHHHH")
self.tmpsam_query.flush() # flush python buffer
os.fsync(self.tmpsam_query.file.fileno()) # flush os buffer to disk
self.tmpsam_query.close()
# No header sam
self.tmpsam_noheader = tempfile.NamedTemporaryFile('w', suffix=".noheader.sam", dir=TEST_DIR, delete=False)
self.tmpsam_noheader.write("read1\t4\t*\t0\t49\t*\t*\t*\t0\tACGT\tHHHH")
self.tmpsam_noheader.flush() # flush python buffer
os.fsync(self.tmpsam_noheader.file.fileno()) # flush os buffer to disk
self.tmpsam_noheader.close()
def test_get_reflen(self):
"""
Tests that header is parsed for ref len properly.
"""
# Test returns reference length or None if not found
actual_ref1_len = sam.sam_handler.get_reflen(sam_filename=self.tmpsam_unsort.name, ref=self.TMPSAM_REF1)
self.assertEqual(self.TMPSAM_REF1_LEN, actual_ref1_len, "Expected {} but got {} for {}".format(self.TMPSAM_REF1_LEN, actual_ref1_len, self.TMPSAM_REF1))
actual_ref2_len = sam.sam_handler.get_reflen(sam_filename=self.tmpsam_unsort.name, ref=self.TMPSAM_REF2)
self.assertEqual(self.TMPSAM_REF2_LEN, actual_ref2_len, "Expected {} but got {} for {}".format(self.TMPSAM_REF2_LEN, actual_ref2_len, self.TMPSAM_REF2))
actual_ref_dne_len = sam.sam_handler.get_reflen(sam_filename=self.tmpsam_unsort.name, ref=self.TMPSAM_REF_DNE)
self.assertEqual(self.TMPSAM_REF_DNE_LEN, actual_ref_dne_len, "Expected {} but got {} for {}".format(self.TMPSAM_REF_DNE_LEN, actual_ref_dne_len, self.TMPSAM_REF_DNE))
# Test that a sam with no header returns None for reference length
actual_ref_dne_len = sam.sam_handler.get_reflen(sam_filename=self.tmpsam_noheader.name, ref=self.TMPSAM_REF_DNE)
self.assertEqual(actual_ref_dne_len, self.TMPSAM_REF_DNE_LEN, "Expected {} but got {} for {}".format(self.TMPSAM_REF_DNE_LEN, actual_ref_dne_len, self.TMPSAM_REF_DNE))
def test_is_query_sort(self):
"""
Test that header parsed properly for sam order
"""
actual_unknown_sort = sam.sam_handler.is_query_sort(sam_filename=self.tmpsam_unsort.name)
self.assertEqual(False, actual_unknown_sort, "Expected False but got {} for is_query_sort of unknown sort order".format(actual_unknown_sort))
actual_query_sort = sam.sam_handler.is_query_sort(sam_filename=self.tmpsam_query.name)
self.assertEqual(True, actual_query_sort, "Expected True but got {} for is_query_sort of queryname sort order".format(actual_query_sort))
# Test that a sam with no header returns False for query sorted
actual_noheader = sam.sam_handler.is_query_sort(sam_filename=self.tmpsam_noheader.name)
self.assertEqual(False, actual_noheader, "Expected False but got {} for is_query_sort of sam with no header".format(actual_noheader))
@staticmethod
def diff_fasta_line(fasta1, fasta2):
"""
Checks if fasta contents are the same. Order matters. Header contents matter.
Line endings don't matter.
Assumes both fastas have each sequence on 1 line.
:return: newline separated concatenation of the line in fasta1 and fasta2 that doesn't match or None if all lines match
:rtype: str
"""
with open(fasta1, 'rU') as fh1, open(fasta2, 'rU') as fh2:
i = -1
for i, line1 in enumerate(fh1):
try:
line2 = fh2.next()
if line1.rstrip() != line2.rstrip():
return "line " + str(i+1) + ":\n" + line1 + "\n" + line2
except StopIteration: # in case fh2 is already at eof
return "line " + str(i+1) + ":\n" + line1 + "\n<eof>"
try: # check if fh2 has more lines than fh1
line2 = fh2.next()
if line2:
i += 1
return "line " + str(i+1) + ":\n<eof>\n" + line2
except StopIteration:
pass # in case fh2 is already at eof
return None
def test_create_msa_slice_from_sam_pair_selection(self):
"""
Tests that the sam_handler.create_msa_slice_from_sam() is iterating through the records
and selecting the correct records for pairing.
- Test Missing Mates
- Test Unmapped Mates
- Test Mates Mapped to wrong ref
- Test Pair Mapped to Wrong Ref
- Test Low Map Qual Threshold:
- Test Secondary, Chimeric Alignments:
CIGAR: Should be tested in sam_record
- test H, S, X, =, M, P
-
"""
ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA = TEST_DIR + os.sep + os.path.basename(TEST_PAIR_SELECTION_SAM).replace(".sam", ".msa.fasta")
# Test that the pairs are selected correctly. We don't care about slices, breadth thresholds or N's or masking stop codons here.
# But we do care about mapping quality and target references.
actual_written = sam.sam_handler.create_msa_slice_from_sam(sam_filename=TEST_PAIR_SELECTION_SAM,
ref=TEST_PAIR_SELECTION_TARGET_REF,
out_fasta_filename=ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF, max_prop_N=1.0,
breadth_thresh=0, start_pos=0, end_pos=0,
do_insert_wrt_ref=False, do_mask_stop_codon=False)
self.assertTrue(os.path.exists(ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA) and os.path.getsize(ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA) > 0,
ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA + " doesn't exist or is empty")
diff_line = TestSamHandler.diff_fasta_line(EXPECTED_TEST_PAIR_SELECTION_FULL_MSA_FASTA, ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA)
self.assertIsNone(diff_line,
"Expected full msa fasta " + EXPECTED_TEST_PAIR_SELECTION_FULL_MSA_FASTA + " different than " +
ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA + ":\n" + str(diff_line))
expected_written = Utility.get_total_seq_from_fasta(EXPECTED_TEST_PAIR_SELECTION_FULL_MSA_FASTA)
self.assertEqual(expected_written, actual_written,
"Expect total written seq {} but got {} from {}".format(expected_written, actual_written, ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA))
def test_create_msa_slice_from_sam_dup(self):
"""
Tests that the sam_handler.create_msa_slice_from_sam() is iterating through non-duplicate records
and selecting the correct records for pairing.
"""
ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA = TEST_DIR + os.sep + os.path.basename(TEST_PAIR_SELECTION_REMDUP_SAM).replace(".sam", ".msa.fasta")
ACTUAL_TEST_PAIR_SELECTION_DUP_TSV = TEST_DIR + os.sep + os.path.basename(TEST_PAIR_SELECTION_REMDUP_SAM).replace(".sam", ".tsv")
# Test that the pairs are selected correctly. We don't care about slices, breadth thresholds or N's or masking stop codons here.
# But we do care about mapping quality and target references.
actual_written = sam.sam_handler.create_msa_slice_from_sam(sam_filename=TEST_PAIR_SELECTION_REMDUP_SAM,
ref=TEST_PAIR_SELECTION_TARGET_REF,
out_fasta_filename=ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF, max_prop_N=1.0,
breadth_thresh=0, start_pos=0, end_pos=0,
do_insert_wrt_ref=False, do_mask_stop_codon=False,
do_remove_dup=True,
out_dup_tsv_filename=ACTUAL_TEST_PAIR_SELECTION_DUP_TSV)
self.assertTrue(os.path.exists(ACTUAL_TEST_PAIR_SELECTION_DUP_TSV) and os.path.getsize(ACTUAL_TEST_PAIR_SELECTION_DUP_TSV)>0,
ACTUAL_TEST_PAIR_SELECTION_DUP_TSV + " doesn't exist or is empty")
self.assertTrue(os.path.exists(ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA) and os.path.getsize(ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA) > 0,
ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA + " doesn't exist or is empty")
diff_line = TestSamHandler.diff_fasta_line(EXPECTED_TEST_PAIR_SELECTION_REMDUP_FULL_MSA_FASTA, ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA)
self.assertIsNone(diff_line,
"Expected full msa fasta " + EXPECTED_TEST_PAIR_SELECTION_REMDUP_FULL_MSA_FASTA + " different than " +
ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA + ":\n" + str(diff_line))
expected_written = Utility.get_total_seq_from_fasta(EXPECTED_TEST_PAIR_SELECTION_REMDUP_FULL_MSA_FASTA)
self.assertEqual(expected_written, actual_written,
"Expect total written seq {} but got {} from {}".format(expected_written, actual_written, ACTUAL_TEST_PAIR_SELECTION_FULL_MSA_FASTA))
def test_create_msa_slice_from_sam_unsorted(self):
"""
Tests that the sam_handler.create_msa_slice_from_sam() requires a queryname sorted sam
"""
actual_tmpsam_unsort_msa_fasta = TEST_DIR + os.sep + os.path.basename(self.tmpsam_unsort.name).replace(".sam", ".msa.fasta")
self.assertRaises(ValueError, sam.sam_handler.create_msa_slice_from_sam,
sam_filename=self.tmpsam_unsort.name,
ref=TEST_PAIR_SELECTION_TARGET_REF,
out_fasta_filename=actual_tmpsam_unsort_msa_fasta,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=1.0,
breadth_thresh=0,
start_pos=None, end_pos=None,
do_insert_wrt_ref=False,
do_mask_stop_codon=False)
def __write_sam_testcase(self, testcases, samfile):
"""
Writes the sam records to file for the list of SamTestCase
"""
# Assume each testcase has the same reference to length dict SamTestCase.ref2len
with open(samfile, 'w') as fh_out:
fh_out.write("@HD\tVN:0.0\tSO:queryname\n")
for ref, ref_len in testcases[0].ref2len.iteritems():
fh_out.write("@SQ\tSN:{}\tLN:{}\n".format(ref, ref_len))
for testcase in testcases:
lines = testcase.create_sam_lines()
fh_out.write(lines)
def test_create_msa_slice_from_sam_maxpropN(self):
"""
Tests that the sam_handler.create_msa_slice_from_sam() filters out sequences that have too many N's or gaps
"""
# We only care that the sequences are filtered by fraction of N's.
# We don't care about breadth thresholds or slicing.
ACTUAL_TEST_MERGE_FULL_MSA_FASTA = TEST_DIR + os.sep + os.path.basename(TEST_MERGE_FASTQ).replace(".fq", ".msa.fasta")
TEST_MERGE_SAM = TEST_DIR + os.sep + os.path.basename(TEST_MERGE_FASTQ).replace(".fq", ".sam")
self.__write_sam_testcase(self.merge_testcases, TEST_MERGE_SAM)
self.assertTrue(os.path.exists(TEST_MERGE_SAM) and os.path.getsize(TEST_MERGE_SAM) > 0,
"Expected test case sam for merging records " + TEST_MERGE_SAM + " does not exist or is empty")
actual_written = sam.sam_handler.create_msa_slice_from_sam(sam_filename=TEST_MERGE_SAM,
ref=self.merge_testcases[0].target_ref,
out_fasta_filename=ACTUAL_TEST_MERGE_FULL_MSA_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=MAX_PROP_N, breadth_thresh=0,
start_pos=0, end_pos=0, do_insert_wrt_ref=True,
do_mask_stop_codon=True)
self.assertTrue(os.path.exists(ACTUAL_TEST_MERGE_FULL_MSA_FASTA) and os.path.getsize(ACTUAL_TEST_MERGE_FULL_MSA_FASTA) > 0,
ACTUAL_TEST_MERGE_FULL_MSA_FASTA + " doesn't exist or is empty")
actual_header2seq = Utility.get_seq_dict(ACTUAL_TEST_MERGE_FULL_MSA_FASTA)
expected_written = 0
for testcase in self.merge_testcases:
expected_seq, expected_qual = testcase.get_sliced_merged_read(slice_start_pos_wrt_ref_1based=None, slice_end_pos_wrt_ref_1based=None,
do_pad_wrt_slice=True, do_insert_wrt_ref=True, do_mask_stop_codon=True)
actual_seq = actual_header2seq.get(testcase.read_name, None)
if expected_seq.count("N") / float(len(expected_seq)) > MAX_PROP_N:
self.assertIsNone(actual_seq,
"Expect read " + testcase.read_name + " should not be in " + ACTUAL_TEST_MERGE_FULL_MSA_FASTA)
else:
expected_written += 1
self.assertEqual(expected_seq, actual_seq,
"Expected {} but got {} for testcase {}".format(expected_seq, actual_seq, testcase.read_name))
self.assertEqual(expected_written, actual_written,
"Expect total written seq {} but got {} from {}".format(expected_written, actual_written, ACTUAL_TEST_MERGE_FULL_MSA_FASTA))
def test_create_msa_slice_from_sam_slice(self):
"""
Tests that the sam_handler.create_msa_slice_from_sam() slices properly
"""
ACTUAL_TEST_MERGE_FULL_MSA_FASTA = TEST_DIR + os.sep + os.path.basename(TEST_MERGE_FASTQ).replace(".fq", ".msa.fasta")
TEST_MERGE_SAM = TEST_DIR + os.sep + os.path.basename(TEST_MERGE_FASTQ).replace(".fq", ".sam")
self.__write_sam_testcase(self.merge_testcases, TEST_MERGE_SAM)
self.assertTrue(os.path.exists(TEST_MERGE_SAM) and os.path.getsize(TEST_MERGE_SAM) > 0,
"Expected test case sam for merging records " + TEST_MERGE_SAM + " does not exist or is empty")
actual_written = sam.sam_handler.create_msa_slice_from_sam(sam_filename=TEST_MERGE_SAM,
ref=self.merge_testcases[0].target_ref,
out_fasta_filename=ACTUAL_TEST_MERGE_FULL_MSA_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=1.0, breadth_thresh=MIN_WINDOW_BREADTH_COV_FRACTION,
start_pos=self.merge_testcases[0].slice_start,
end_pos=self.merge_testcases[0].slice_end, do_insert_wrt_ref=True,
do_mask_stop_codon=True)
self.assertTrue(os.path.exists(ACTUAL_TEST_MERGE_FULL_MSA_FASTA) and os.path.getsize(ACTUAL_TEST_MERGE_FULL_MSA_FASTA) > 0,
ACTUAL_TEST_MERGE_FULL_MSA_FASTA + " doesn't exist or is empty")
actual_header2seq = Utility.get_seq_dict(ACTUAL_TEST_MERGE_FULL_MSA_FASTA)
expected_written = 0
for testcase in self.merge_testcases:
expected_seq, expected_qual = testcase.get_sliced_merged_read(slice_start_pos_wrt_ref_1based=testcase.slice_start,
slice_end_pos_wrt_ref_1based=testcase.slice_end,
do_pad_wrt_slice=True, do_insert_wrt_ref=True, do_mask_stop_codon=True)
actual_seq = actual_header2seq.get(testcase.read_name, None)
slice_len = self.merge_testcases[0].slice_end - self.merge_testcases[0].slice_start + 1
if float(expected_seq.count("N") + expected_seq.count("-")) / slice_len > (1.0- MIN_WINDOW_BREADTH_COV_FRACTION):
self.assertIsNone(actual_seq,
"Expect read " + testcase.read_name + " should not be in " + ACTUAL_TEST_MERGE_FULL_MSA_FASTA)
else:
expected_written += 1
self.assertEqual(expected_seq, actual_seq,
"Expected {} but got {} for testcase {}".format(expected_seq, actual_seq, testcase.read_name))
self.assertEqual(expected_written, actual_written,
"Expect total written seq {} but got {} from {}".format(expected_written, actual_written, ACTUAL_TEST_MERGE_FULL_MSA_FASTA))
if __name__ == '__main__':
unittest.main()
|
cfe-lab/Umberjack
|
test/Test_sam_handler.py
|
Python
|
bsd-2-clause
| 22,038
|
[
"BWA"
] |
3d1d7a2db81bddaf618e2a685b6df3d1c6cbb960ceb2eefe181101b58386a2c7
|
import unittest
import numpy as np
from spglib import get_symmetry_dataset, get_hall_number_from_symmetry
from vasp import read_vasp
from os import listdir
dirnames = ('cubic',
'hexagonal',
'monoclinic',
'orthorhombic',
'tetragonal',
'triclinic',
'trigonal',
'distorted',
'virtual_structure')
class TestGetHallNumberFromSymmetry(unittest.TestCase):
def setUp(self):
self._filenames = []
for d in dirnames:
self._filenames += ["%s/%s" % (d, fname)
for fname in listdir("./data/%s" % d)]
def tearDown(self):
pass
def test_get_hall_number_from_symmetry(self):
for fname in self._filenames:
spgnum = int(fname.split('-')[1])
cell = read_vasp("./data/%s" % fname)
if 'distorted' in fname:
dataset = get_symmetry_dataset(cell, symprec=1e-1)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-1)
if hall_number != dataset['hall_number']:
print("%d != %d in %s" %
(hall_number, dataset['hall_number'], fname))
ref_cell = (dataset['std_lattice'],
dataset['std_positions'],
dataset['std_types'])
dataset = get_symmetry_dataset(ref_cell, symprec=1e-5)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-5)
print("Using refinced cell: %d, %d in %s" %
(hall_number, dataset['hall_number'], fname))
else:
dataset = get_symmetry_dataset(cell, symprec=1e-5)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-5)
self.assertEqual(hall_number, dataset['hall_number'],
msg=("%d != %d in %s" %
(hall_number, dataset['hall_number'], fname)))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(
TestGetHallNumberFromSymmetry)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
sauliusg/cod-tools
|
src/externals/spglib/python/test/test_hall_number_from_symmetry.py
|
Python
|
gpl-2.0
| 2,575
|
[
"VASP"
] |
578382370a2f02ad49fbbf11c8d481b7b3a08be78e2600df9a90e24f0b73ae12
|
#!/usr/local/bin/python
# encoding: utf-8
"""
*Import Multi Unit Spectroscopic Explorer (MUSE) IFS galaxy stream into sherlock-catalogues database*
:Author:
David Young
"""
from __future__ import print_function
import sys
import os
os.environ['TERM'] = 'vt100'
import readline
import glob
import pickle
import codecs
import string
import requests
import re
from docopt import docopt
from astrocalc.coords import unit_conversion
from fundamentals.download import multiobject_download
from ._base_importer import _base_importer
class ifs(_base_importer):
"""
*Importer for the Multi Unit Spectroscopic Explorer (MUSE) IFS galaxy catalogue stream*
**Key Arguments**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
**Usage**
To import the IFS catalogue stream into the sherlock-catalogues database, run the following:
```python
from sherlock.imports import IFS
```
stream = IFS(
log=log,
settings=settings
)
stream.ingest()
.. todo ::
- abstract this module out into its own stand alone script
- check sublime snippet exists
"""
# INITIALISATION
def ingest(self):
"""*Import the IFS catalogue into the sherlock-catalogues database*
The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
self.dbTableName = "tcs_cat_ifs_stream"
self.databaseInsertbatchSize = 500
dictList = self._create_dictionary_of_IFS()
tableName = self.dbTableName
createStatement = """
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`decDeg` double DEFAULT NULL,
`name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`z` double DEFAULT NULL,
`htm16ID` bigint(20) DEFAULT NULL,
`htm10ID` bigint(20) DEFAULT NULL,
`htm13ID` bigint(20) DEFAULT NULL,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
PRIMARY KEY (`primaryId`),
UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`),
KEY `idx_htm16ID` (`htm16ID`),
KEY `idx_htm10ID` (`htm10ID`),
KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self.log.debug('completed the ``get`` method')
return None
def _create_dictionary_of_IFS(
self):
"""*Generate the list of dictionaries containing all the rows in the IFS stream*
**Return**
- ``dictList`` - a list of dictionaries containing all the rows in the IFS stream
**Usage**
```python
from sherlock.imports import IFS
stream = IFS(
log=log,
settings=settings
)
dictList = stream._create_dictionary_of_IFS()
```
"""
self.log.debug(
'starting the ``_create_dictionary_of_IFS`` method')
# GRAB THE CONTENT OF THE IFS CSV
try:
response = requests.get(
url=self.settings["ifs galaxies url"],
)
thisData = response.content
thisData = str(thisData).split("\n")
status_code = response.status_code
except requests.exceptions.RequestException:
print('HTTP Request failed')
sys.exit(0)
dictList = []
columns = ["name", "raDeg", "decDeg", "z"]
for line in thisData:
thisDict = {}
line = line.strip()
line = line.replace("\t", " ")
values = line.split("|")
if len(values) > 3:
thisDict["name"] = values[0].strip()
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
try:
raDeg = converter.ra_sexegesimal_to_decimal(
ra=values[1].strip()
)
thisDict["raDeg"] = raDeg
decDeg = converter.dec_sexegesimal_to_decimal(
dec=values[2].strip()
)
thisDict["decDeg"] = decDeg
except:
name = thisDict["name"]
self.log.warning(
'Could not convert the coordinates for IFS source %(name)s. Skipping import of this source.' % locals())
continue
try:
z = float(values[3].strip())
if z > 0.:
thisDict["z"] = float(values[3].strip())
else:
thisDict["z"] = None
except:
thisDict["z"] = None
dictList.append(thisDict)
self.log.debug(
'completed the ``_create_dictionary_of_IFS`` method')
return dictList
# use the tab-trigger below for new method
# xt-class-method
|
thespacedoctor/sherlock
|
sherlock/imports/ifs.py
|
Python
|
mit
| 5,686
|
[
"Galaxy"
] |
e6a5e3340029b6c8e7ef48a51ebc9ee2d24915b275429116bed9a404bed3beca
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
from espressomd import thermostat
from espressomd import interactions
import numpy
# System parameters
#############################################################
system = espressomd.System()
#if no seed is provided espresso generates a seed
system.time_step = 0.01
system.cell_system.skin = 0.4
system.box_l = [100, 100, 100]
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
system.cell_system.set_n_square(use_verlet_lists=False)
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
fene = interactions.FeneBond(k=10, d_r_max=2)
system.bonded_inter.add(fene)
poly = system.polymer
poly(N_P = 1, bond_length = 1.0, MPC=50, bond_id=0)
#############################################################
# Integration #
#############################################################
for i in range(20):
system.integrator.run(1000)
energies = system.analysis.energy()
print(energies)
|
lahnerml/espresso
|
samples/python/minimal-polymer.py
|
Python
|
gpl-3.0
| 1,793
|
[
"ESPResSo"
] |
cde69044723fec30e09302390f1769b020ef3ed9ac94a4eec921e2ab8194310e
|
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.2.1" # With 2to3 run on it for Python 3.6
__license__ = """
Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
Copyright 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import warnings
from html.entities import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from io import StringIO as _StringIO
except ImportError:
from io import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
'''
:return: A :class:`FeedParserDict`.
'''
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in list(link.items()) if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']=='license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
'''
:return: A :class:`FeedParserDict`.
'''
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
return id(self)
_cp1252 = {
128: chr(8364), # euro sign
130: chr(8218), # single low-9 quotation mark
131: chr( 402), # latin small letter f with hook
132: chr(8222), # double low-9 quotation mark
133: chr(8230), # horizontal ellipsis
134: chr(8224), # dagger
135: chr(8225), # double dagger
136: chr( 710), # modifier letter circumflex accent
137: chr(8240), # per mille sign
138: chr( 352), # latin capital letter s with caron
139: chr(8249), # single left-pointing angle quotation mark
140: chr( 338), # latin capital ligature oe
142: chr( 381), # latin capital letter z with caron
145: chr(8216), # left single quotation mark
146: chr(8217), # right single quotation mark
147: chr(8220), # left double quotation mark
148: chr(8221), # right double quotation mark
149: chr(8226), # bullet
150: chr(8211), # en dash
151: chr(8212), # em dash
152: chr( 732), # small tilde
153: chr(8482), # trade mark sign
154: chr( 353), # latin small letter s with caron
155: chr(8250), # single right-pointing angle quotation mark
156: chr( 339), # latin small ligature oe
158: chr( 382), # latin small letter z with caron
159: chr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, str):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urllib.parse.urljoin(base, uri)
except ValueError:
uri = ''
if not isinstance(uri, str):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if not self._matchnamespaces:
for k, v in list(self.namespaces.items()):
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, str):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = list(map(self._normalize_attributes, attrs))
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, str):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') != -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = chr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = chr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = 'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = 'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, str):
pieces[i] = v.decode('utf-8')
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = 'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and not isinstance(output, str):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and isinstance(output, str):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, str):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category or _end_tags or _end_itunes_keywords
if element in ('category', 'tags', 'itunes_keywords'):
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = output.replace('&', '&')
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if [t for t in re.findall(r'</?(\w+)',s) if t.lower() not in _HTMLSanitizer.acceptable_elements]:
return
# all entities must have been defined as valid HTML entities
if [e for e in re.findall(r'&(\w+);', s) if e not in list(entitydefs.keys())]:
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos != -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%ss' % key, [FeedParserDict()])[-1]
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, detail)
if author:
detail['name'] = author
if email:
detail['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_dcterms_valid(self, attrsD):
self.push('validity', 1)
def _end_dcterms_valid(self):
for validity_detail in self.pop('validity').split(';'):
if '=' in validity_detail:
key, value = validity_detail.split('=', 1)
if key == 'start':
self._save('validity_start', value, overwrite=True)
self._save('validity_start_parsed', _parse_date(value), overwrite=True)
elif key == 'end':
self._save('validity_end', value, overwrite=True)
self._save('validity_end_parsed', _parse_date(value), overwrite=True)
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = 'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = 'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict(term=term, scheme=scheme, label=label)
if value not in tags:
tags.append(value)
def _start_tags(self, attrsD):
# This is a completely-made up element. Its semantics are determined
# only by a single feed that precipitated bug report 392 on Google Code.
# In short, this is junk code.
self.push('tags', 1)
def _end_tags(self):
for term in self.pop('tags').split(','):
self._addTag(term.strip(), None, None)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), 'http://www.itunes.com/', None)
def _end_media_keywords(self):
for term in self.pop('media_keywords').split(','):
if term.strip():
self._addTag(term.strip(), None, None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', list(attrsD.items()))
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = 'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD['url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_rating(self, attrsD):
context = self._getContext()
context.setdefault('media_rating', attrsD)
self.push('rating', 1)
def _end_media_rating(self):
rating = self.pop('rating')
if rating is not None and rating.strip():
context = self._getContext()
context['media_rating']['content'] = rating
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')]
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix)
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in list(self.namespacesInUse.items()):
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in list(attrs.items()):
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, list(attrsD.items()))
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in list(self.namespacesInUse.items()):
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.__code__ = sgmllib.SGMLParser.goahead.__code__
def __parse_starttag(self, i):
pass
__parse_starttag.__code__ = sgmllib.SGMLParser.parse_starttag.__code__
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + '_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, str):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = list(dict([(k.lower(), v) for k, v in attrs]).items())
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, str):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((str(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = ''.join([' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
data = data.replace('/', '/')
data = data.replace('/', '/')
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urllib.parse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set([
'annotation',
'annotation-xml',
'maction',
'maligngroup',
'malignmark',
'math',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msubsup',
'msup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover',
'none',
'semantics',
])
mathml_attributes = set([
'accent',
'accentunder',
'actiontype',
'align',
'alignmentscope',
'altimg',
'altimg-height',
'altimg-valign',
'altimg-width',
'alttext',
'bevelled',
'charalign',
'close',
'columnalign',
'columnlines',
'columnspacing',
'columnspan',
'columnwidth',
'crossout',
'decimalpoint',
'denomalign',
'depth',
'dir',
'display',
'displaystyle',
'edge',
'encoding',
'equalcolumns',
'equalrows',
'fence',
'fontstyle',
'fontweight',
'form',
'frame',
'framespacing',
'groupalign',
'height',
'href',
'id',
'indentalign',
'indentalignfirst',
'indentalignlast',
'indentshift',
'indentshiftfirst',
'indentshiftlast',
'indenttarget',
'infixlinebreakstyle',
'largeop',
'length',
'linebreak',
'linebreakmultchar',
'linebreakstyle',
'lineleading',
'linethickness',
'location',
'longdivstyle',
'lquote',
'lspace',
'mathbackground',
'mathcolor',
'mathsize',
'mathvariant',
'maxsize',
'minlabelspacing',
'minsize',
'movablelimits',
'notation',
'numalign',
'open',
'other',
'overflow',
'position',
'rowalign',
'rowlines',
'rowspacing',
'rowspan',
'rquote',
'rspace',
'scriptlevel',
'scriptminsize',
'scriptsizemultiplier',
'selection',
'separator',
'separators',
'shift',
'side',
'src',
'stackalign',
'stretchy',
'subscriptshift',
'superscriptshift',
'symmetric',
'voffset',
'width',
'xlink:href',
'xlink:show',
'xlink:type',
'xmlns',
'xmlns:xlink',
])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if [n_v for n_v in attrs if n_v[0].startswith('xlink:')]:
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == 'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib.request.HTTPDigestAuthHandler, urllib.request.HTTPRedirectHandler, urllib.request.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib.request.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urllib.parse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
:return: A :class:`StringIO.StringIO` or :class:`io.BytesIO`.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, str) \
and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.parse.splittype(url_file_stream_or_string)
realhost, rest = urllib.parse.splithost(rest)
if realhost:
user_passwd, realhost = urllib.parse.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, str):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib.request.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, str):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib.request.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, str):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in list(request_headers.items()):
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = '\ub144' # b3e2 in euc-kr
_korean_month = '\uc6d4' # bff9 in euc-kr
_korean_day = '\uc77c' # c0cf in euc-kr
_korean_am = '\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = '\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
'\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7
'\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7
'\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7
'\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7
'\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7
'\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7
'\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7
'\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7
'\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7
'\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7
'\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7
'\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7
'\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7
'\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7
'\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7
'\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7
'\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7
'\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7
'\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
'\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7
'\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7
'\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7
'\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7
'\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7
'\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7
'\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile('([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
'janu\u00e1r': '01', # e1 in iso-8859-2
'febru\u00e1ri': '02', # e1 in iso-8859-2
'm\u00e1rcius': '03', # e1 in iso-8859-2
'\u00e1prilis': '04', # e1 in iso-8859-2
'm\u00e1ujus': '05', # e1 in iso-8859-2
'j\u00fanius': '06', # fa in iso-8859-2
'j\u00falius': '07', # fa in iso-8859-2
'augusztus': '08',
'szeptember': '09',
'okt\u00f3ber': '10', # f3 in iso-8859-2
'november': '11',
'december': '12',
}
_hungarian_date_format_re = \
re.compile('(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = list(map(int, timeparts))
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates.
Converts asctime to RFC822-compatible dates and uses the RFC822 parser
to do the actual parsing.
Supported formats (format is standardized to the first one listed):
* {weekday name} {month name} dd hh:mm:ss {+-tz} yyyy
* {weekday name} {month name} dd hh:mm:ss yyyy
"""
parts = dt.split()
# Insert a GMT timezone, if needed.
if len(parts) == 5:
parts.insert(4, '+0000')
# Exit if there are not six parts.
if len(parts) != 6:
return None
# Reassemble the parts in an RFC822-compatible order and parse them.
return _parse_date_rfc822(' '.join([
parts[0], parts[2], parts[1], parts[5], parts[3], parts[4],
]))
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = ''
xml_encoding = ''
rfc3023_encoding = ''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = 'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = 'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = 'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = 'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = 'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = 'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = 'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = 'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
'u16', 'utf-16', 'utf16', 'utf_16',
'u32', 'utf-32', 'utf32', 'utf_32',
'iso-10646-ucs-2', 'iso-10646-ucs-4',
'csucs4', 'csunicode', 'ucs-2', 'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, str):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd',
'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or 'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or 'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == 'gb2312':
rfc3023_encoding = 'gb18030'
if xml_encoding.lower() == 'gb2312':
xml_encoding = 'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
lazy_chardet_encoding = None
tried_encodings = []
if chardet:
def lazy_chardet_encoding():
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, str):
chardet_encoding = str(chardet_encoding, 'ascii', 'ignore')
return chardet_encoding
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
lazy_chardet_encoding, 'utf-8', 'windows-1252', 'iso-8859-2'):
if callable(proposed_encoding):
proposed_encoding = proposed_encoding()
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + '\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = ''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = 'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = list(filter(match_safe_entities, entity_results))
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = map(float, value.strip().replace(',', ' ').split())
nxt = latlons.__next__
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Point', 'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'LineString', 'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {'type': 'Polygon', 'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Box', 'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
:return: A :class:`FeedParserDict`.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception as e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in list(result['headers'].items()))
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error) as e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error as e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error as e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', '')
if not isinstance(etag, str):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', '')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, str):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', '')
href = result.get('href', '')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, str) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException as e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
|
flavour/eden
|
modules/feedparser5213.py
|
Python
|
mit
| 159,859
|
[
"NetCDF",
"VisIt"
] |
e8d336a0233778a5974036fa60d87cc8408e05ae93abcde0fdbbba78153a60bb
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
import os
import re
import sys
import uuid
import shutil
import inspect
import subprocess
from psi4.driver import qcdb
from psi4.driver import p4util
from psi4.driver.molutil import *
from psi4.driver.p4util.exceptions import *
# never import driver, wrappers, or aliases into this file
P4C4_INFO = {}
def run_cfour(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Stanton and Gauss's CFOUR code.
Also processes results back into Psi4 format.
This function is not called directly but is instead called by
:py:func:`~psi4.energy` or :py:func:`~psi4.optimize` when a Cfour
method is requested (through *name* argument). In order to function
correctly, the Cfour executable ``xcfour`` must be present in
:envvar:`PATH` or :envvar:`PSIPATH`.
.. hlist::
:columns: 1
* Many :ref:`PSI Variables <apdx:cfour_psivar>` extracted from the Cfour output
* Python dictionary of associated file constants accessible as ``P4C4_INFO['zmat']``, ``P4C4_INFO['output']``, ``P4C4_INFO['grd']``, *etc.*
:type name: str
:param name: ``'c4-scf'`` || ``'c4-ccsd(t)'`` || ``'cfour'`` || etc.
First argument, usually unlabeled. Indicates the computational
method to be applied to the system.
:type keep: :ref:`boolean <op_py_boolean>`
:param keep: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether to delete the Cfour scratch directory upon
completion of the Cfour job.
:type path: str
:param path:
Indicates path to Cfour scratch directory (with respect to Psi4
scratch directory). Otherwise, the default is a subdirectory
within the Psi4 scratch directory.
If specified, GENBAS and/or ZMAT within will be used.
:type genbas: str
:param genbas:
Indicates that contents should be used for GENBAS file.
GENBAS is a complicated topic. It is quite unnecessary if the
molecule is from a molecule {...} block and basis is set through
|Psifours| BASIS keyword. In that case, a GENBAS is written from
LibMints and all is well. Otherwise, a GENBAS is looked for in
the usual places: PSIPATH, PATH, PSIDATADIR/basis. If path kwarg is
specified, also looks there preferentially for a GENBAS. Can
also specify GENBAS within an input file through a string and
setting the genbas kwarg. Note that due to the input parser's
aggression, blank lines need to be replaced by the text blankline.
"""
lowername = name.lower()
internal_p4c4_info = {}
return_wfn = kwargs.pop('return_wfn', False)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
optstash = p4util.OptionsState(
['CFOUR', 'TRANSLATE_PSI4'])
# Determine calling function and hence dertype
calledby = inspect.stack()[1][3]
dertype = ['energy', 'gradient', 'hessian'].index(calledby)
#print('I am %s called by %s called by %s.\n' %
# (inspect.stack()[0][3], inspect.stack()[1][3], inspect.stack()[2][3]))
# Save submission directory
current_directory = os.getcwd()
# Move into job scratch directory
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path())
# Construct and move into cfour subdirectory of job scratch directory
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir)
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH') + \
':' + core.get_datadir() + '/basis',
'GENBAS_PATH': core.get_datadir() + '/basis',
'CFOUR_NUM_CORES': os.environ.get('CFOUR_NUM_CORES'),
'MKL_NUM_THREADS': os.environ.get('MKL_NUM_THREADS'),
'OMP_NUM_THREADS': os.environ.get('OMP_NUM_THREADS'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
if 'path' in kwargs:
lenv['PATH'] = kwargs['path'] + ':' + lenv['PATH']
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Load the GENBAS file
genbas_path = qcdb.search_file('GENBAS', lenv['GENBAS_PATH'])
if genbas_path:
try:
shutil.copy2(genbas_path, psioh.get_default_path() + cfour_tmpdir)
except shutil.Error: # should only fail if src and dest equivalent
pass
core.print_out("\n GENBAS loaded from %s\n" % (genbas_path))
core.print_out(" CFOUR to be run from %s\n" % (psioh.get_default_path() + cfour_tmpdir))
else:
message = """
GENBAS file for CFOUR interface not found. Either:
[1] Supply a GENBAS by placing it in PATH or PSIPATH
[1a] Use cfour {} block with molecule and basis directives.
[1b] Use molecule {} block and CFOUR_BASIS keyword.
[2] Allow Psi4's internal basis sets to convert to GENBAS
[2a] Use molecule {} block and BASIS keyword.
"""
core.print_out(message)
core.print_out(' Search path that was tried:\n')
core.print_out(lenv['PATH'].replace(':', ', '))
# Generate the ZMAT input file in scratch
if 'path' in kwargs and os.path.isfile('ZMAT'):
core.print_out(" ZMAT loaded from %s\n" % (psioh.get_default_path() + kwargs['path'] + '/ZMAT'))
else:
with open('ZMAT', 'w') as cfour_infile:
cfour_infile.write(write_zmat(lowername, dertype, molecule))
internal_p4c4_info['zmat'] = open('ZMAT', 'r').read()
#core.print_out('\n====== Begin ZMAT input for CFOUR ======\n')
#core.print_out(open('ZMAT', 'r').read())
#core.print_out('======= End ZMAT input for CFOUR =======\n\n')
#print('\n====== Begin ZMAT input for CFOUR ======')
#print(open('ZMAT', 'r').read())
#print('======= End ZMAT input for CFOUR =======\n')
if 'genbas' in kwargs:
with open('GENBAS', 'w') as cfour_basfile:
cfour_basfile.write(kwargs['genbas'].replace('\nblankline\n', '\n\n'))
core.print_out(' GENBAS loaded from kwargs string\n')
# Close psi4 output file and reopen with filehandle
print('output in', current_directory + '/' + core.outfile_name())
pathfill = '' if os.path.isabs(core.outfile_name()) else current_directory + os.path.sep
# Handle threading
# OMP_NUM_THREADS from env is in lenv from above
# threads from psi4 -n (core.get_num_threads()) is ignored
# CFOUR_OMP_NUM_THREADS psi4 option takes precedence, handled below
if core.has_option_changed('CFOUR', 'CFOUR_OMP_NUM_THREADS') == True:
lenv['OMP_NUM_THREADS'] = str(core.get_option('CFOUR', 'CFOUR_OMP_NUM_THREADS'))
#print("""\n\n<<<<< RUNNING CFOUR ... >>>>>\n\n""")
# Call executable xcfour, directing cfour output to the psi4 output file
cfour_executable = kwargs['c4exec'] if 'c4exec' in kwargs else 'xcfour'
try:
retcode = subprocess.Popen([cfour_executable], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
data = data.decode('utf-8')
if not data:
break
core.print_out(data)
c4out += data
internal_p4c4_info['output'] = c4out
c4files = {}
core.print_out('\n')
for item in ['GRD', 'FCMFINAL', 'DIPOL']:
try:
with open(psioh.get_default_path() + cfour_tmpdir + '/' + item, 'r') as handle:
c4files[item] = handle.read()
core.print_out(' CFOUR scratch file %s has been read\n' % (item))
core.print_out('%s\n' % c4files[item])
internal_p4c4_info[item.lower()] = c4files[item]
except IOError:
pass
core.print_out('\n')
if molecule.name() == 'blank_molecule_psi4_yo':
qcdbmolecule = None
else:
molecule.update_geometry()
qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
qcdbmolecule.update_geometry()
# c4mol, if it exists, is dinky, just a clue to geometry of cfour results
psivar, c4grad, c4mol = qcdb.cfour.harvest(qcdbmolecule, c4out, **c4files)
# Absorb results into psi4 data structures
for key in psivar.keys():
core.set_variable(key.upper(), float(psivar[key]))
if qcdbmolecule is None and c4mol is not None:
molecule = geometry(c4mol.create_psi4_string_from_molecule(), name='blank_molecule_psi4_yo')
molecule.update_geometry()
# This case arises when no Molecule going into calc (cfour {} block) but want
# to know the orientation at which grad, properties, etc. are returned (c4mol).
# c4mol is dinky, w/o chg, mult, dummies and retains name
# blank_molecule_psi4_yo so as to not interfere with future cfour {} blocks
if c4grad is not None:
mat = core.Matrix.from_list(c4grad)
core.set_gradient(mat)
#print ' <<< [3] C4-GRD-GRAD >>>'
#mat.print()
# exit(1)
# # Things needed core.so module to do
# collect c4out string
# read GRD
# read FCMFINAL
# see if theres an active molecule
# # Things delegatable to qcdb
# parsing c4out
# reading GRD and FCMFINAL strings
# reconciling p4 and c4 molecules (orient)
# reconciling c4out and GRD and FCMFINAL results
# transforming frame of results back to p4
# # Things run_cfour needs to have back
# psivar
# qcdb.Molecule of c4?
# coordinates?
# gradient in p4 frame
# # Process the cfour output
# psivar, c4coord, c4grad = qcdb.cfour.cfour_harvest(c4out)
# for key in psivar.keys():
# core.set_variable(key.upper(), float(psivar[key]))
#
# # Awful Hack - Go Away TODO
# if c4grad:
# molecule = core.get_active_molecule()
# molecule.update_geometry()
#
# if molecule.name() == 'blank_molecule_psi4_yo':
# p4grad = c4grad
# p4coord = c4coord
# else:
# qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
# #p4grad = qcdbmolecule.deorient_array_from_cfour(c4coord, c4grad)
# #p4coord = qcdbmolecule.deorient_array_from_cfour(c4coord, c4coord)
#
# with open(psioh.get_default_path() + cfour_tmpdir + '/GRD', 'r') as cfour_grdfile:
# c4outgrd = cfour_grdfile.read()
# print('GRD\n',c4outgrd)
# c4coordGRD, c4gradGRD = qcdb.cfour.cfour_harvest_files(qcdbmolecule, grd=c4outgrd)
#
# p4mat = core.Matrix.from_list(p4grad)
# core.set_gradient(p4mat)
# print(' <<< P4 PSIVAR >>>')
# for item in psivar:
# print(' %30s %16.8f' % (item, psivar[item]))
#print(' <<< P4 COORD >>>')
#for item in p4coord:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print(' <<< P4 GRAD >>>')
# for item in c4grad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# Clean up cfour scratch directory unless user instructs otherwise
keep = yes.match(str(kwargs['keep'])) if 'keep' in kwargs else False
os.chdir('..')
try:
if keep or ('path' in kwargs):
core.print_out('\n CFOUR scratch files have been kept in %s\n' % (psioh.get_default_path() + cfour_tmpdir))
else:
shutil.rmtree(cfour_tmpdir)
except OSError as e:
print('Unable to remove CFOUR temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory and reopen output file
os.chdir(current_directory)
core.print_out('\n')
p4util.banner(' Cfour %s %s Results ' % (name.lower(), calledby.capitalize()))
core.print_variables()
if c4grad is not None:
core.get_gradient().print_out()
core.print_out('\n')
p4util.banner(' Cfour %s %s Results ' % (name.lower(), calledby.capitalize()))
core.print_variables()
if c4grad is not None:
core.get_gradient().print_out()
# Quit if Cfour threw error
if 'CFOUR ERROR CODE' in core.variables():
raise ValidationError("""Cfour exited abnormally.""")
P4C4_INFO.clear()
P4C4_INFO.update(internal_p4c4_info)
optstash.restore()
# new skeleton wavefunction w/mol, highest-SCF basis (just to choose one), & not energy
# Feb 2017 hack. Could get proper basis in skel wfn even if not through p4 basis kw
gobas = core.get_global_option('BASIS') if core.get_global_option('BASIS') else 'sto-3g'
basis = core.BasisSet.build(molecule, "ORBITAL", gobas)
if basis.has_ECP():
raise ValidationError("""ECPs not hooked up for Cfour""")
wfn = core.Wavefunction(molecule, basis)
optstash.restore()
if dertype == 0:
finalquantity = psivar['CURRENT ENERGY']
elif dertype == 1:
finalquantity = core.get_gradient()
wfn.set_gradient(finalquantity)
if finalquantity.rows(0) < 20:
core.print_out('CURRENT GRADIENT')
finalquantity.print_out()
elif dertype == 2:
pass
#finalquantity = finalhessian
#wfn.set_hessian(finalquantity)
#if finalquantity.rows(0) < 20:
# core.print_out('CURRENT HESSIAN')
# finalquantity.print_out()
return wfn
def cfour_list():
"""Form list of Cfour :py:func:`~driver.energy` arguments."""
return qcdb.cfour.cfour_list()
def cfour_gradient_list():
"""Form list of Cfour analytic :py:func:`~driver.gradient` arguments."""
return qcdb.cfour.cfour_gradient_list()
def cfour_psivar_list():
"""Form dictionary of :ref:`PSI Variables <apdx:cfour_psivar>` set by Cfour methods."""
return qcdb.cfour.cfour_psivar_list()
def write_zmat(name, dertype, molecule):
"""Returns string with contents of Cfour ZMAT file as gathered from
active molecule, current keyword settings, and cfour {...} block.
"""
# Handle memory
mem = int(0.000001 * core.get_memory())
if mem == 524:
memcmd, memkw = '', {}
else:
memcmd, memkw = qcdb.cfour.muster_memory(mem)
# Handle molecule and basis set
if molecule.name() == 'blank_molecule_psi4_yo':
molcmd, molkw = '', {}
bascmd, baskw = '', {}
core.set_local_option('CFOUR', 'TRANSLATE_PSI4', False)
else:
molecule.update_geometry()
#print(molecule.create_psi4_string_from_molecule())
qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
qcdbmolecule.tagline = molecule.name()
molcmd, molkw = qcdbmolecule.format_molecule_for_cfour()
if core.get_global_option('BASIS') == '':
bascmd, baskw = '', {}
else:
user_pg = molecule.schoenflies_symbol()
molecule.reset_point_group('c1') # need basis printed for *every* atom
qbs = core.BasisSet.build(molecule, "BASIS", core.get_global_option('BASIS'))
if qbs.has_ECP():
raise ValidationError("""ECPs not hooked up for Cfour""")
with open('GENBAS', 'w') as cfour_basfile:
cfour_basfile.write(qbs.genbas())
core.print_out(' GENBAS loaded from Psi4 LibMints for basis %s\n' % (core.get_global_option('BASIS')))
molecule.reset_point_group(user_pg)
molecule.update_geometry()
bascmd, baskw = qcdbmolecule.format_basis_for_cfour(qbs.has_puream())
# Handle psi4 keywords implying cfour keyword values
if core.get_option('CFOUR', 'TRANSLATE_PSI4'):
psicmd, psikw = qcdb.cfour.muster_psi4options(p4util.prepare_options_for_modules(changedOnly=True))
else:
psicmd, psikw = '', {}
# Handle calc type and quantum chemical method
mdccmd, mdckw = qcdb.cfour.muster_modelchem(name, dertype)
# Handle calc type and quantum chemical method
mdccmd, mdckw = qcdb.cfour.muster_modelchem(name, dertype)
# Handle driver vs input/default keyword reconciliation
userkw = p4util.prepare_options_for_modules()
userkw = qcdb.options.reconcile_options(userkw, memkw)
userkw = qcdb.options.reconcile_options(userkw, molkw)
userkw = qcdb.options.reconcile_options(userkw, baskw)
userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = qcdb.options.reconcile_options(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
optcmd = qcdb.options.prepare_options_for_cfour(userkw)
# Handle text to be passed untouched to cfour
litcmd = core.get_global_option('LITERAL_CFOUR')
# Assemble ZMAT pieces
zmat = memcmd + molcmd + optcmd + mdccmd + psicmd + bascmd + litcmd
if len(re.findall(r'^\*(ACES2|CFOUR|CRAPS)\(', zmat, re.MULTILINE)) != 1:
core.print_out('\n Faulty ZMAT constructed:\n%s' % (zmat))
raise ValidationError("""
Multiple *CFOUR(...) blocks in input. This usually arises
because molecule or options are specified both the psi4 way through
molecule {...} and set ... and the cfour way through cfour {...}.""")
return zmat
|
lothian/psi4
|
psi4/driver/procrouting/interface_cfour.py
|
Python
|
lgpl-3.0
| 18,937
|
[
"CFOUR",
"Psi4"
] |
9c57664b4e20d410611d717d73824f6f6425ab90a6d44f05016d075126f84c06
|
########################################################################
# File: Operation.py
# Date: 2012/07/24 12:12:05
########################################################################
"""
:mod: Operation
.. module: Operation
:synopsis: Operation implementation
Operation implementation
"""
# Disable invalid names warning
# pylint: disable=invalid-name
__RCSID__ = "$Id$"
import datetime
from types import StringTypes
import json
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.JSONUtils import RMSEncoder
########################################################################
class Operation( object ):
"""
:param long OperationID: OperationID as read from DB backend
:param long RequestID: parent RequestID
:param str Status: execution status
:param str Type: operation to perform
:param str Arguments: additional arguments
:param str SourceSE: source SE name
:param str TargetSE: target SE names as comma separated list
:param str Catalog: catalog to use as comma separated list
:param str Error: error string if any
:param Request.Request parent: parent Request instance
It is managed by SQLAlchemy, so the RequestID, OperationID should never be set by hand
(except when constructed from JSON of course...)
In principle, the _parent attribute could be totally managed by SQLAlchemy. However, it is
set only when inserted into the DB, this is why I manually set it in the Request _notify
"""
# # max files in a single operation
MAX_FILES = 10000
# # all states
ALL_STATES = ( "Queued", "Waiting", "Scheduled", "Assigned", "Failed", "Done", "Canceled" )
# # final states
FINAL_STATES = ( "Failed", "Done", "Canceled" )
# # valid attributes
ATTRIBUTE_NAMES = ['OperationID', 'RequestID', "Type", "Status", "Arguments",
"Order", "SourceSE", "TargetSE", "Catalog", "Error",
"CreationTime", "SubmitTime", "LastUpdate"]
_datetimeFormat = '%Y-%m-%d %H:%M:%S'
def __init__( self, fromDict = None ):
""" c'tor
:param self: self reference
:param dict fromDict: attributes dictionary
"""
self._parent = None
now = datetime.datetime.utcnow().replace( microsecond = 0 )
self._SubmitTime = now
self._LastUpdate = now
self._CreationTime = now
self._Status = "Queued"
self._Order = 0
self.__files__ = []
self.TargetSE = None
self.SourceSE = None
self.Arguments = None
self.Error = None
self.Type = None
self._Catalog = None
fromDict = fromDict if isinstance( fromDict, dict )\
else json.loads( fromDict ) if isinstance( fromDict, StringTypes )\
else {}
if "Files" in fromDict:
for fileDict in fromDict.get( "Files", [] ):
self.addFile( File( fileDict ) )
del fromDict["Files"]
for key, value in fromDict.items():
# The JSON module forces the use of UTF-8, which is not properly
# taken into account in DIRAC.
# One would need to replace all the '== str' with 'in StringTypes'
if type( value ) in StringTypes:
value = value.encode()
if value:
setattr( self, key, value )
# # protected methods for parent only
def _notify( self ):
""" notify self about file status change """
fStatus = set( self.fileStatusList() )
if fStatus == set( ['Failed'] ):
# All files Failed -> Failed
newStatus = 'Failed'
elif 'Scheduled' in fStatus:
newStatus = 'Scheduled'
elif "Waiting" in fStatus:
newStatus = 'Queued'
elif 'Failed' in fStatus:
newStatus = 'Failed'
else:
self.Error = ''
newStatus = 'Done'
# If the status moved to Failed or Done, update the lastUpdate time
if newStatus in ('Failed', 'Done', 'Scheduled'):
if self._Status != newStatus:
self._LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
self._Status = newStatus
if self._parent:
self._parent._notify()
def _setQueued( self, caller ):
""" don't touch """
if caller == self._parent:
self._Status = "Queued"
def _setWaiting( self, caller ):
""" don't touch as well """
if caller == self._parent:
self._Status = "Waiting"
# # Files arithmetics
def __contains__( self, opFile ):
""" in operator """
return opFile in self.__files__
def __iadd__( self, opFile ):
""" += operator """
if len( self ) >= Operation.MAX_FILES:
raise RuntimeError( "too many Files in a single Operation" )
self.addFile( opFile )
return self
def addFile( self, opFile ):
""" add :opFile: to operation
.. warning::
You cannot add a File object that has already been added to another operation. They must be different objects
"""
if len( self ) >= Operation.MAX_FILES:
raise RuntimeError( "too many Files in a single Operation" )
if opFile not in self:
self.__files__.append( opFile )
opFile._parent = self
self._notify()
# # helpers for looping
def __iter__( self ):
""" files iterator """
return self.__files__.__iter__()
def __getitem__( self, i ):
""" [] op for opFiles """
return self.__files__.__getitem__( i )
def __delitem__( self, i ):
""" remove file from op, only if OperationID is NOT set """
self.__files__.__delitem__( i )
self._notify()
def __setitem__( self, i, opFile ):
""" overwrite opFile """
self.__files__.__setitem__( i, opFile )
opFile._parent = self
self._notify()
def fileStatusList( self ):
""" get list of files statuses """
return [ subFile.Status for subFile in self ]
def __nonzero__( self ):
""" for comparisons
"""
return True
def __len__( self ):
""" nb of subFiles """
return len( self.__files__ )
@property
def sourceSEList( self ):
""" helper property returning source SEs as a list"""
return self.SourceSE.split( "," ) if self.SourceSE else ['']
@property
def targetSEList( self ):
""" helper property returning target SEs as a list"""
return self.TargetSE.split( "," ) if self.TargetSE else ['']
@property
def Catalog( self ):
""" catalog prop """
return self._Catalog
@Catalog.setter
def Catalog( self, value ):
""" catalog setter """
if type( value ) not in ( str, unicode, list ):
raise TypeError( "wrong type for value" )
if type( value ) in ( str, unicode ):
value = value.split( ',' )
value = ",".join( list ( set ( [ str( item ).strip() for item in value if str( item ).strip() ] ) ) )
if len( value ) > 255:
raise ValueError( "Catalog list too long" )
self._Catalog = value.encode() if value else ""
@property
def catalogList( self ):
""" helper property returning catalogs as list """
return self._Catalog.split( "," ) if self._Catalog else []
@property
def Status( self ):
""" Status prop """
return self._Status
@Status.setter
def Status( self, value ):
""" Status setter """
if value not in Operation.ALL_STATES:
raise ValueError( "unknown Status '%s'" % str( value ) )
if self.__files__:
self._notify()
else:
# If the status moved to Failed or Done, update the lastUpdate time
if value in ( 'Failed', 'Done' ):
if self._Status != value:
self._LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
self._Status = value
if self._parent:
self._parent._notify()
if self._Status == 'Done':
self.Error = ''
@property
def Order( self ):
""" order prop """
if self._parent:
self._Order = self._parent.indexOf( self ) if self._parent else -1
return self._Order
@Order.setter
def Order( self, value ):
""" order prop """
self._Order = value
@property
def CreationTime( self ):
""" operation creation time prop """
return self._CreationTime
@CreationTime.setter
def CreationTime( self, value = None ):
""" creation time setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "CreationTime should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._CreationTime = value
@property
def SubmitTime( self ):
""" subrequest's submit time prop """
return self._SubmitTime
@SubmitTime.setter
def SubmitTime( self, value = None ):
""" submit time setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "SubmitTime should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._SubmitTime = value
@property
def LastUpdate( self ):
""" last update prop """
return self._LastUpdate
@LastUpdate.setter
def LastUpdate( self, value = None ):
""" last update setter """
if type( value ) not in ( [datetime.datetime] + list( StringTypes ) ):
raise TypeError( "LastUpdate should be a datetime.datetime!" )
if type( value ) in StringTypes:
value = datetime.datetime.strptime( value.split( "." )[0], self._datetimeFormat )
self._LastUpdate = value
if self._parent:
self._parent.LastUpdate = value
def __str__( self ):
""" str operator """
return self.toJSON()['Value']
def toJSON( self ):
""" Returns the JSON description string of the Operation """
try:
jsonStr = json.dumps( self, cls = RMSEncoder )
return S_OK( jsonStr )
except Exception as e:
return S_ERROR( str( e ) )
def _getJSONData( self ):
""" Returns the data that have to be serialized by JSON """
jsonData = {}
for attrName in Operation.ATTRIBUTE_NAMES:
# RequestID and OperationID might not be set since they are managed by SQLAlchemy
if not hasattr( self, attrName ):
continue
value = getattr( self, attrName )
if isinstance( value, datetime.datetime ):
# We convert date time to a string
jsonData[attrName] = value.strftime( self._datetimeFormat )
else:
jsonData[attrName] = value
jsonData['Files'] = self.__files__
return jsonData
|
andresailer/DIRAC
|
RequestManagementSystem/Client/Operation.py
|
Python
|
gpl-3.0
| 10,421
|
[
"DIRAC"
] |
b65e9eb9c832f90c66f4310f2aeb0b94c9c230ddfee469a0ec3d195aa8273e10
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import copy
import math
import numpy as np
from chemfiles import Frame, UnitCell, Topology, Atom, Residue, ChemfilesError
from chemfiles import BondOrder, CellShape
from _utils import remove_warnings
class TestFrame(unittest.TestCase):
def test_repr(self):
frame = Frame()
self.assertEqual(frame.__repr__(), "Frame with 0 atoms")
frame.resize(4)
self.assertEqual(frame.__repr__(), "Frame with 4 atoms")
self.assertEqual(
frame.atoms.__repr__(), "[Atom(''), Atom(''), Atom(''), Atom('')]"
)
def test_copy(self):
frame = Frame()
cloned = copy.copy(frame)
self.assertEqual(len(frame.atoms), 0)
self.assertEqual(len(cloned.atoms), 0)
frame.resize(6)
self.assertEqual(len(frame.atoms), 6)
self.assertEqual(len(cloned.atoms), 0)
def test_atoms_count(self):
frame = Frame()
self.assertEqual(len(frame.atoms), 0)
frame.resize(4)
self.assertEqual(len(frame.atoms), 4)
frame.remove(2)
self.assertEqual(len(frame.atoms), 3)
def test_add_atom(self):
frame = Frame()
frame.add_atom(Atom("F"), (3, 4, 5))
self.assertEqual(len(frame.atoms), 1)
self.assertEqual(list(frame.positions[0]), [3, 4, 5])
frame.add_velocities()
frame.add_atom(Atom("F"), (-3, -4, 5), (1, 0, 1))
self.assertEqual(list(frame.positions[1]), [-3, -4, 5])
self.assertEqual(list(frame.velocities[1]), [1, 0, 1])
def test_positions(self):
frame = Frame()
frame.resize(4)
expected = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
np.float64,
)
np.copyto(frame.positions, expected)
self.assertEqual(frame.positions.all(), expected.all())
frame.positions[3, 2] = 42
self.assertEqual(frame.positions[3, 2], 42)
# Checking empty frame positions access
_ = Frame().positions
def test_velocities(self):
frame = Frame()
frame.resize(4)
self.assertFalse(frame.has_velocities())
frame.add_velocities()
self.assertTrue(frame.has_velocities())
expected = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
np.float64,
)
np.copyto(frame.velocities, expected)
self.assertEqual(frame.velocities.all(), expected.all())
frame.velocities[3, 2] = 42
self.assertEqual(frame.velocities[3, 2], 42)
# Checking empty frame velocities access
frame = Frame()
frame.add_velocities()
_ = frame.velocities
def test_cell(self):
frame = Frame()
frame.cell = UnitCell([1, 2, 4])
self.assertEqual(frame.cell.lengths, (1, 2, 4))
self.assertEqual(frame.cell.angles, (90, 90, 90))
self.assertEqual(frame.cell.shape, CellShape.Orthorhombic)
frame.cell.lengths = (3, 4, 5)
self.assertEqual(frame.cell.lengths, (3, 4, 5))
def test_topology(self):
frame = Frame()
frame.resize(2)
topology = Topology()
topology.atoms.append(Atom("Zn"))
topology.atoms.append(Atom("Ar"))
frame.topology = topology
self.assertEqual(frame.atoms[0].name, "Zn")
self.assertEqual(frame.atoms[1].name, "Ar")
def test_step(self):
frame = Frame()
self.assertEqual(frame.step, 0)
frame.step = 42
self.assertEqual(frame.step, 42)
def test_out_of_bounds(self):
frame = Frame()
frame.resize(3)
_ = frame.atoms[2]
with self.assertRaises(IndexError):
_ = frame.atoms[6]
def test_iter(self):
frame = Frame()
frame.resize(3)
for i, atom in enumerate(frame.atoms):
self.assertEqual(atom.name, "")
self.assertEqual(i, 2)
for i, atom in enumerate(frame.topology.atoms):
self.assertEqual(atom.name, "")
self.assertEqual(i, 2)
def test_property(self):
frame = Frame()
frame["foo"] = 3
self.assertEqual(frame["foo"], 3.0)
frame["foo"] = False
self.assertEqual(frame["foo"], False)
with remove_warnings:
with self.assertRaises(ChemfilesError):
_ = frame["bar"]
with self.assertRaises(ChemfilesError):
frame[3] = "test"
with self.assertRaises(ChemfilesError):
_ = frame[3]
# Check that enabling indexing/__getitem__ did not enable iteration
with self.assertRaises(TypeError):
for i in frame:
pass
frame["bar"] = "baz"
self.assertEqual(frame.properties_count(), 2)
self.assertEqual(set(frame.list_properties()), {"bar", "foo"})
def test_distance(self):
frame = Frame()
frame.cell = UnitCell([3.0, 4.0, 5.0])
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (1, 2, 6))
self.assertEqual(frame.distance(0, 1), math.sqrt(6.0))
def test_angle(self):
frame = Frame()
frame.add_atom(Atom(""), (1, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 1, 0))
self.assertEqual(frame.angle(0, 1, 2), math.pi / 2.0)
def test_dihedral(self):
frame = Frame()
frame.add_atom(Atom(""), (1, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 1, 0))
frame.add_atom(Atom(""), (-1, 1, 0))
self.assertEqual(frame.dihedral(0, 1, 2, 3), math.pi)
def test_out_of_plane(self):
frame = Frame()
frame.add_atom(Atom(""), (1, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 1, 0))
frame.add_atom(Atom(""), (0, 0, 3))
self.assertEqual(frame.out_of_plane(1, 3, 0, 2), 3.0)
def test_bonds(self):
frame = Frame()
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_atom(Atom(""), (0, 0, 0))
frame.add_bond(0, 1)
frame.add_bond(3, 4)
frame.add_bond(2, 1, BondOrder.Quintuplet)
self.assertEqual(
frame.topology.bonds.all(), np.array([[0, 1], [1, 2], [3, 4]]).all()
)
self.assertEqual(
frame.topology.bonds_orders,
[BondOrder.Unknown, BondOrder.Quintuplet, BondOrder.Unknown],
)
frame.remove_bond(3, 4)
# Also try to remove non-existing bonds
frame.remove_bond(3, 4)
frame.remove_bond(0, 4)
self.assertEqual(frame.topology.bonds.all(), np.array([[0, 1], [1, 2]]).all())
frame.clear_bonds()
self.assertEqual(frame.topology.bonds_count(), 0)
def test_residues(self):
frame = Frame()
frame.add_residue(Residue("Foo"))
frame.add_residue(Residue("Foo"))
frame.add_residue(Residue("Foo"))
self.assertEqual(len(frame.topology.residues), 3)
self.assertEqual(frame.topology.residues[0].name, "Foo")
if __name__ == "__main__":
unittest.main()
|
Luthaf/Chemharp-python
|
tests/frame.py
|
Python
|
mpl-2.0
| 7,425
|
[
"Chemfiles"
] |
1759ffb59b30cbd9b5cbd28bffd40488bc60186bdecc6827ad8323c8434f04e9
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
import threading
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
# This is a utility function for joblib's Parallel. It can't go locally in
# ForestClassifier or ForestRegressor, because joblib complains that it cannot
# pickle it when placed there.
def accumulate_prediction(predict, X, out, lock):
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict_proba, X, all_proba, lock)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=2, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(clf.feature_importances_)
[ 0.17287856 0.80608704 0.01884792 0.00218648]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>>
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=2,
max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(regr.feature_importances_)
[ 0.17339552 0.81594114 0. 0.01066333]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-2.50699856]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/ensemble/forest.py
|
Python
|
mit
| 79,027
|
[
"Brian"
] |
bac84a3ef771ac8a0d0fec9db2d1f8d2d6aa1ceb87dd8a2aa577ede5682dab08
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
for a group of sequences,calculate pairwise identity
then use igraph to find non-redundant sets of sequences
please install python-igraph,BioPython,numpy first
usage: python igraph_maximal_clique_seq.py seq.fa
"""
import sys
import os
import igraph
import numpy as np
from multiprocessing import Pool
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
def readfa(fa_f):
with open(fa_f) as o_f:
lines = o_f.readlines()
lines = [line.rstrip('\r\n') for line in lines]
begin = [i for i,line in enumerate(lines) if '>' in line]
seqs = [lines[b:e] for b,e in zip(begin,begin[1:]+[len(lines)])]
seqs = [(seq[0][1:],''.join(seq[1:])) for seq in seqs]
return seqs
def align(seq1, seq2):
matrix = matlist.blosum62
gap_open = -10 # usual value
gap_extend = -0.5 # usual value
alns = pairwise2.align.globalds(seq1, seq2, matrix, gap_open, gap_extend)
seq1 = alns[0][0]
seq2 = alns[0][1]
identity = [1 for i, s in enumerate(seq1) if s == seq2[i]]
identity = 1.0 * len(identity)/ len(seq1)
return float('{0:<4.2f}'.format(identity))
def get_similarity(seqs):
scores = []
seqnum = len(seqs)
for i in range(seqnum):
score_i = []
for j in range(seqnum):
if j < i:
score_i.append(scores[j][i])
elif j > i:
score_i.append(align(seqs[i][1],seqs[j][1]))
else:
score_i.append(1.0)
scores.append(score_i)
return scores
def align(p):
s1,s2,seq1,seq2 = p
matrix = matlist.blosum62
gap_open = -10 # usual value
gap_extend = -0.5 # usual value
alns = pairwise2.align.globalds(seq1, seq2, matrix, gap_open, gap_extend)
seq1 = alns[0][0]
seq2 = alns[0][1]
identity = [1 for i, s in enumerate(seq1) if s == seq2[i]]
identity = 1.0 * len(identity)/ len(seq1)
return s1,s2,float('{0:<4.2f}'.format(identity))
def get_similarity(seqs):
seq_pairs = []
seq_num = len(seqs)
for i in range(seq_num):
for j in range(seq_num):
if j > i:
seq_pairs.append((i,j,seqs[i][1],seqs[j][1]))
p = Pool(6)
results = p.map(align,seq_pairs)
p.close()
results = sorted(results)
scores = np.ones(shape=(seq_num,seq_num))
for i,j,s in results:
scores[i][j] = s
scores[j][i] = s
return scores
def igraph_mc(p):
labels,scores,fname,cutoff = p
# to use clique, sequences with similarity below cutoff is linked
adj_m = [map(lambda x: 1 if x < cutoff else 0,row) for row in scores]
for i in range(len(adj_m)):
adj_m[i][i] = 0
g = igraph.Graph.Adjacency(adj_m,mode='undirected')
igraph.plot(g,fname+'_igraph.png')
indexes = g.largest_cliques()
mc_labels = [[labels[i] for i in index] for index in indexes ]
print 'after move redundant seqs','\t',cutoff,'\t',len(mc_labels[0])
return fname,mc_labels
def main():
seqs = readfa(sys.argv[-1])
labels = [pro for pro,_ in seqs]
scores = get_similarity(seqs)
fname = os.path.split(sys.argv[-1])[1].split('.')[0]+'_seq_'
parameters = [[labels,scores,fname+'_nr_seqs_'+str(cutoff),cutoff] for cutoff in [0.3,0.4,0.5,0.6,0.7,0.8]]
p = Pool(6)
results = p.map(igraph_mc,parameters)
p.close()
for fname,mc_labels in results:
if mc_labels:
for i,mc_label in enumerate(mc_labels):
mc_seqs = [(pro,seq) for pro,seq in seqs if pro in mc_label]
with open(fname+'.fa','w') as w_f:
for pro,seq in mc_seqs:
print >> w_f,'>{0}'.format(pro)
s = [seq[k:k+80] for k in range(0,len(seq),80)]
for si in s:
print >> w_f,si
if __name__ == "__main__":
main()
|
lituan/tools
|
igraph_maximal_clique_seq.py
|
Python
|
cc0-1.0
| 3,920
|
[
"Biopython"
] |
f211dbc0ded87260b31d65a949f113bbc8c9c479a782ba7c83aa02016ce4348c
|
#!/usr/bin/env python
"""
This data and module are in the public domain.
http://inamidst.com/phenny/
"""
data = (
("AYGA", -6.08166666667, 145.391666667),
("AYMD", -5.20694444444, 145.788611111),
("AYMH", -5.82611111111, 144.296111111),
("AYNZ", -6.56972222222, 146.726111111),
("AYPY", -9.44333333333, 147.22),
("AYWK", -3.58361111111, 143.669166667),
("BGBW", 61.1611111111, -45.4275),
("BGCO", 70.7394444444, -22.6458333333),
("BGGH", 64.1908333333, -51.6780555556),
("BGJN", 69.2333333333, -51.0666666667),
("BGKK", 65.5833333333, -37.15),
("BGSF", 67.0169444444, -50.6891666667),
("BGTL", 76.5311111111, -68.7030555556),
("BIAR", 65.6597222222, -18.0725),
("BIEG", 65.2833333333, -14.4013888889),
("BIHN", 64.2955555556, -15.2272222222),
("BIHU", 65.9522222222, -17.4258333333),
("BIIS", 66.0580555556, -23.1352777778),
("BIKF", 63.985, -22.6055555556),
("BIKP", 66.3136111111, -16.4611111111),
("BIPA", 65.5558333333, -23.965),
("BIRK", 64.13, -21.9405555556),
("BISI", 66.1333333333, -18.9166666667),
("BIVM", 63.4241666667, -20.2786111111),
("CYAM", 46.485, -84.5094444444),
("CYAV", 50.0563888889, -97.0325),
("CYAW", 44.6397222222, -63.4994444444),
("CYAY", 51.3916666667, -56.0844444444),
("CYAZ", 49.0822222222, -125.7725),
("CYBB", 68.5344444444, -89.8080555556),
("CYBC", 49.1322222222, -68.2072222222),
("CYBG", 48.3305555556, -70.9963888889),
("CYBK", 64.2988888889, -96.0777777778),
("CYBL", 49.9508333333, -125.270833333),
("CYBR", 49.91, -99.9519444444),
("CYCB", 69.1080555556, -105.138333333),
("CYCD", 49.0522222222, -123.87),
("CYCG", 49.2963888889, -117.6325),
("CYCH", 47.0077777778, -65.4491666667),
("CYCL", 47.9905555556, -66.3313888889),
("CYCO", 67.8166666667, -115.143888889),
("CYCT", 52.075, -111.445277778),
("CYCW", 49.1527777778, -121.938888889),
("CYCY", 70.4861111111, -68.5166666667),
("CYCZ", 50.3319444444, -115.873611111),
("CYDA", 64.0430555556, -139.127777778),
("CYDB", 61.3711111111, -139.040555556),
("CYDC", 49.4675, -120.511944444),
("CYDF", 49.2108333333, -57.3913888889),
("CYDL", 58.4222222222, -130.032222222),
("CYDN", 51.1008333333, -100.0525),
("CYDQ", 55.7416666667, -120.181944444),
("CYED", 53.6666666667, -113.466666667),
("CYEG", 53.3097222222, -113.579722222),
("CYEK", 61.0941666667, -94.0708333333),
("CYEN", 49.2102777778, -102.965833333),
("CYET", 53.5788888889, -116.465),
("CYEU", 79.9947222222, -85.8133333333),
("CYEV", 68.3041666667, -133.482777778),
("CYFB", 63.7563888889, -68.5558333333),
("CYFC", 45.8694444444, -66.5316666667),
("CYFE", 48.7461111111, -69.0972222222),
("CYFO", 54.6780555556, -101.681666667),
("CYFR", 61.1808333333, -113.689722222),
("CYFS", 61.7602777778, -121.236666667),
("CYGK", 44.2252777778, -76.5969444444),
("CYGL", 53.6252777778, -77.7041666667),
("CYGP", 48.7752777778, -64.4786111111),
("CYGQ", 49.7783333333, -86.9394444444),
("CYGR", 47.4247222222, -61.7780555556),
("CYGW", 55.2833333333, -77.7666666667),
("CYGX", 56.35, -94.7),
("CYHB", 52.8166666667, -102.311388889),
("CYHD", 49.8316666667, -92.7441666667),
("CYHI", 70.7630555556, -117.805833333),
("CYHK", 68.6355555556, -95.8497222222),
("CYHM", 43.1730555556, -79.935),
("CYHU", 45.5175, -73.4169444444),
("CYHY", 60.8397222222, -115.782777778),
("CYHZ", 44.8808333333, -63.5086111111),
("CYIB", 48.7738888889, -91.6386111111),
("CYIO", 72.6833333333, -77.9666666667),
("CYJN", 45.2944444444, -73.2811111111),
("CYJT", 48.5441666667, -58.55),
("CYKA", 50.7022222222, -120.441944444),
("CYKF", 43.4588888889, -80.3844444444),
("CYKL", 54.805, -66.8052777778),
("CYKY", 51.5175, -109.180833333),
("CYKZ", 43.8622222222, -79.37),
("CYLD", 47.82, -83.3463888889),
("CYLJ", 54.1252777778, -108.522777778),
("CYLL", 53.3091666667, -110.0725),
("CYLT", 82.5177777778, -62.2805555556),
("CYLW", 49.9561111111, -119.377777778),
("CYMA", 63.6166666667, -135.866666667),
("CYMJ", 50.3302777778, -105.559166667),
("CYMM", 56.6533333333, -111.221944444),
("CYMO", 51.2911111111, -80.6077777778),
("CYMW", 46.2744444444, -75.99),
("CYMX", 45.6797222222, -74.0386111111),
("CYNA", 50.1897222222, -61.7891666667),
("CYND", 45.5213888889, -75.5641666667),
("CYNM", 49.7616666667, -77.8027777778),
("CYOC", 67.5705555556, -139.839166667),
("CYOD", 54.405, -110.279444444),
("CYOJ", 58.6213888889, -117.164722222),
("CYOW", 45.3225, -75.6691666667),
("CYPA", 53.2141666667, -105.672777778),
("CYPE", 56.2269444444, -117.447222222),
("CYPG", 49.9027777778, -98.2747222222),
("CYPK", 49.2161111111, -122.71),
("CYPL", 51.4463888889, -90.2141666667),
("CYPN", 49.8363888889, -64.2886111111),
("CYPQ", 44.23, -78.3633333333),
("CYPR", 54.2861111111, -130.444722222),
("CYPY", 58.7672222222, -111.117222222),
("CYQA", 44.9747222222, -79.3033333333),
("CYQB", 46.7883333333, -71.3975),
("CYQF", 52.1786111111, -113.893055556),
("CYQG", 42.2755555556, -82.9555555556),
("CYQH", 60.1177777778, -128.821944444),
("CYQK", 49.7883333333, -94.3630555556),
("CYQL", 49.6302777778, -112.799722222),
("CYQM", 46.1122222222, -64.6786111111),
("CYQN", 50.1827777778, -86.6963888889),
("CYQQ", 49.7108333333, -124.886666667),
("CYQR", 50.4319444444, -104.665833333),
("CYQT", 48.3719444444, -89.3238888889),
("CYQU", 55.1797222222, -118.885),
("CYQV", 51.2647222222, -102.461666667),
("CYQW", 52.7691666667, -108.243611111),
("CYQX", 48.9369444444, -54.5680555556),
("CYQY", 46.1613888889, -60.0477777778),
("CYQZ", 53.0261111111, -122.51),
("CYRB", 74.7169444444, -94.9694444444),
("CYRI", 47.7644444444, -69.5847222222),
("CYRJ", 48.52, -72.2655555556),
("CYRM", 52.4297222222, -114.904166667),
("CYRT", 62.8113888889, -92.1158333333),
("CYSB", 46.625, -80.7988888889),
("CYSC", 45.4380555556, -71.6905555556),
("CYSJ", 45.3161111111, -65.8902777778),
("CYSM", 60.0222222222, -111.960277778),
("CYSR", 72.9822222222, -84.6136111111),
("CYSU", 46.4427777778, -63.8311111111),
("CYSY", 71.9938888889, -125.2425),
("CYTE", 64.23, -76.5266666667),
("CYTH", 55.8011111111, -97.8641666667),
("CYTR", 44.1188888889, -77.5280555556),
("CYTS", 48.5697222222, -81.3766666667),
("CYTZ", 43.6275, -79.3961111111),
("CYUB", 69.4333333333, -133.026388889),
("CYUL", 45.4680555556, -73.7413888889),
("CYUT", 66.5213888889, -86.2247222222),
("CYUX", 68.7761111111, -81.2436111111),
("CYUY", 48.2061111111, -78.8355555556),
("CYVC", 55.1513888889, -105.261944444),
("CYVG", 53.3558333333, -110.823888889),
("CYVM", 67.5458333333, -64.0313888889),
("CYVO", 48.0533333333, -77.7827777778),
("CYVP", 58.0961111111, -68.4269444444),
("CYVQ", 65.2825, -126.800277778),
("CYVR", 49.195, -123.181944444),
("CYVT", 55.8419444444, -108.4175),
("CYVV", 44.7458333333, -81.1072222222),
("CYWA", 45.9522222222, -77.3191666667),
("CYWG", 49.91, -97.2344444444),
("CYWK", 52.9219444444, -66.8644444444),
("CYWL", 52.1830555556, -122.054166667),
("CYWY", 63.2094444444, -123.436666667),
("CYXC", 49.6102777778, -115.7825),
("CYXD", 53.5725, -113.520555556),
("CYXE", 52.1708333333, -106.699722222),
("CYXH", 50.0188888889, -110.720833333),
("CYXJ", 56.2380555556, -120.740277778),
("CYXL", 50.1144444444, -91.9041666667),
("CYXP", 66.145, -65.7136111111),
("CYXR", 47.695, -79.8488888889),
("CYXS", 53.8894444444, -122.678888889),
("CYXT", 54.4663888889, -128.5775),
("CYXU", 43.0355555556, -81.1538888889),
("CYXX", 49.0252777778, -122.363333333),
("CYXY", 60.7094444444, -135.068333333),
("CYYB", 46.3636111111, -79.4227777778),
("CYYC", 51.1138888889, -114.020277778),
("CYYD", 54.8247222222, -127.182777778),
("CYYE", 58.8363888889, -122.596944444),
("CYYF", 49.4627777778, -119.602222222),
("CYYG", 46.29, -63.1211111111),
("CYYH", 69.5466666667, -93.5766666667),
("CYYJ", 48.6469444444, -123.425833333),
("CYYL", 56.8638888889, -101.076111111),
("CYYN", 50.2919444444, -107.690555556),
("CYYQ", 58.7391666667, -94.065),
("CYYR", 53.3191666667, -60.4258333333),
("CYYT", 47.6186111111, -52.7519444444),
("CYYU", 49.4138888889, -82.4675),
("CYYW", 50.2902777778, -88.9097222222),
("CYYY", 48.6086111111, -68.2080555556),
("CYYZ", 43.6772222222, -79.6305555556),
("CYZD", 43.7425, -79.4655555556),
("CYZE", 45.8852777778, -82.5677777778),
("CYZF", 62.4627777778, -114.440277778),
("CYZH", 55.2933333333, -114.778333333),
("CYZP", 53.2541666667, -131.813888889),
("CYZR", 42.9994444444, -82.3088888889),
("CYZT", 50.6805555556, -127.366666667),
("CYZU", 54.1438888889, -115.786666667),
("CYZV", 50.2233333333, -66.2655555556),
("CYZW", 60.1727777778, -132.742777778),
("CYZX", 44.9844444444, -64.9169444444),
("CZFA", 62.2075, -133.375833333),
("CZFM", 67.4077777778, -134.860277778),
("DAAB", 36.5036111111, 2.81416666667),
("DAAD", 35.3325, 4.20638888889),
("DAAE", 36.7119444444, 5.06972222222),
("DAAG", 36.6908333333, 3.21527777778),
("DAAJ", 24.2925, 9.45222222222),
("DAAK", 36.5458333333, 2.87611111111),
("DAAM", 36.1086111111, 6.36444444444),
("DAAN", 26.71, 0.285555555556),
("DAAP", 26.5733333333, 8.48361111111),
("DAAQ", 35.5252777778, 2.87861111111),
("DAAS", 36.1780555556, 5.32444444444),
("DAAT", 22.8108333333, 5.45083333333),
("DAAV", 36.795, 5.87333333333),
("DAAY", 33.5358333333, -0.242222222222),
("DAAZ", 35.7522222222, 0.626111111111),
("DABB", 36.8222222222, 7.80916666667),
("DABC", 36.2766666667, 6.62388888889),
("DABS", 35.4313888889, 8.12055555556),
("DAFH", 32.9297222222, 3.31222222222),
("DAOB", 35.3411111111, 1.46305555556),
("DAOE", 35.7352777778, -0.805277777778),
("DAOF", 27.7002777778, -8.16694444444),
("DAOI", 36.2125, 1.33166666667),
("DAOL", 35.5422222222, -0.532222222222),
("DAON", 35.0166666667, -1.45),
("DAOO", 35.6236111111, -0.621111111111),
("DAOS", 35.1716666667, -0.593055555556),
("DAOV", 35.2075, 0.146944444444),
("DAUA", 27.8375, -0.186388888889),
("DAUB", 34.7930555556, 5.73805555556),
("DAUE", 30.5711111111, 2.85944444444),
("DAUG", 32.3838888889, 3.79388888889),
("DAUH", 31.6727777778, 6.14027777778),
("DAUI", 27.2508333333, 2.51194444444),
("DAUK", 33.0677777778, 6.08861111111),
("DAUL", 33.7638888889, 2.92722222222),
("DAUT", 29.2369444444, 0.275833333333),
("DAUU", 31.9172222222, 5.41277777778),
("DAUZ", 28.0513888889, 9.64277777778),
("DBBB", 6.35722222222, 2.38416666667),
("DBBP", 9.35694444444, 2.60888888889),
("DFFD", 12.3530555556, -1.51222222222),
("DFOO", 11.16, -4.33083333333),
("DGAA", 5.60277777778, -0.168055555556),
("DGLE", 9.56333333333, -0.863333333333),
("DGLW", 10.0825, -2.5075),
("DGSN", 7.36166666667, -2.32861111111),
("DGTK", 4.89333333333, -1.775),
("DIAP", 5.25972222222, -3.92638888889),
("DIBK", 7.73861111111, -5.07361111111),
("DIDL", 6.7925, -6.47305555556),
("DIKO", 9.38694444444, -5.55638888889),
("DIMN", 7.27194444444, -7.58694444444),
("DISP", 4.74666666667, -6.66055555556),
("DIYO", 6.90305555556, -5.36583333333),
("DNAA", 9.00666666667, 7.26305555556),
("DNAK", 7.24666666667, 5.30083333333),
("DNBE", 6.31722222222, 5.59944444444),
("DNCA", 4.97583333333, 8.34694444444),
("DNEN", 6.47416666667, 7.56194444444),
("DNGU", 12.1716666667, 6.69611111111),
("DNIB", 7.36222222222, 3.97833333333),
("DNIL", 8.44, 4.49388888889),
("DNJO", 9.63972222222, 8.86888888889),
("DNKA", 10.6958333333, 7.32),
("DNKN", 12.0475, 8.52444444444),
("DNMA", 11.8552777778, 13.0808333333),
("DNMK", 7.70361111111, 8.61388888889),
("DNMM", 6.57722222222, 3.32111111111),
("DNMN", 9.65194444444, 6.46222222222),
("DNPO", 5.01527777778, 6.94944444444),
("DNSO", 12.9161111111, 5.20694444444),
("DNYO", 9.26027777778, 12.4297222222),
("DNZA", 11.13, 7.68555555556),
("DRRM", 13.5025, 7.12666666667),
("DRRN", 13.4813888889, 2.18361111111),
("DRRT", 14.8755555556, 5.26527777778),
("DRZA", 16.9647222222, 7.99694444444),
("DRZD", 18.9686111111, 12.8686111111),
("DRZF", 13.3727777778, 12.6266666667),
("DRZR", 13.7788888889, 8.98361111111),
("DRZT", 14.9994444444, 8.76694444444),
("DTMB", 35.7580555556, 10.7547222222),
("DTTA", 36.8508333333, 10.2269444444),
("DTTB", 37.2452777778, 9.79138888889),
("DTTD", 32.3061111111, 10.3819444444),
("DTTF", 34.4219444444, 8.8225),
("DTTG", 33.8766666667, 10.1033333333),
("DTTI", 36.7211111111, 9.94305555556),
("DTTJ", 33.875, 10.7752777778),
("DTTR", 31.7041666667, 9.25444444444),
("DTTX", 34.7177777778, 10.6908333333),
("DTTZ", 33.9397222222, 8.11055555556),
("DXNG", 9.76722222222, 1.09111111111),
("DXXX", 6.16555555556, 1.25388888889),
("EBAW", 51.19, 4.46277777778),
("EBBE", 50.7586111111, 4.76833333333),
("EBBL", 51.1677777778, 5.47083333333),
("EBBR", 50.9022222222, 4.49861111111),
("EBBT", 51.3333333333, 4.5),
("EBBX", 49.8872222222, 5.22861111111),
("EBCI", 50.4591666667, 4.45361111111),
("EBCV", 50.5758333333, 3.83083333333),
("EBFN", 51.09, 2.65277777778),
("EBFS", 50.2436111111, 4.64861111111),
("EBKT", 50.8177777778, 3.20833333333),
("EBLG", 50.6372222222, 5.44305555556),
("EBOS", 51.1988888889, 2.86222222222),
("EBSL", 50.9483333333, 5.59166666667),
("EBST", 50.7883333333, 5.19277777778),
("EBUL", 51.1438888889, 3.47416666667),
("EBWE", 51.395, 4.96055555556),
("EBZR", 51.2655555556, 4.75472222222),
("EDAB", 51.1933333333, 14.5197222222),
("EDAC", 50.9816666667, 12.5061111111),
("EDAD", 51.8319444444, 12.1858333333),
("EDAE", 52.1972222222, 14.5855555556),
("EDAH", 53.8786111111, 14.1522222222),
("EDAK", 51.3080555556, 13.5547222222),
("EDAM", 51.3627777778, 11.9408333333),
("EDAQ", 51.5519444444, 12.0525),
("EDAU", 51.2944444444, 13.3588888889),
("EDAX", 53.3063888889, 12.7530555556),
("EDAY", 52.5797222222, 13.9155555556),
("EDAZ", 52.2033333333, 13.1586111111),
("EDBC", 51.8558333333, 11.4180555556),
("EDBG", 35.4605555556, -77.9647222222),
("EDBH", 54.3380555556, 12.71),
("EDBJ", 50.9172222222, 11.7136111111),
("EDBK", 52.9186111111, 12.4252777778),
("EDBM", 52.0736111111, 11.6263888889),
("EDBN", 51.3280555556, 12.6566666667),
("EDBR", 51.3644444444, 14.9519444444),
("EDCA", 53.8325, 13.6688888889),
("EDCD", 51.8894444444, 14.5316666667),
("EDCK", 51.7211111111, 11.9616666667),
("EDCM", 51.2961111111, 14.1288888889),
("EDDB", 52.38, 13.5225),
("EDDC", 51.1325, 13.7669444444),
("EDDE", 50.98, 10.9580555556),
("EDDF", 50.0263888889, 8.54305555556),
("EDDG", 52.1344444444, 7.68472222222),
("EDDH", 53.6302777778, 9.98805555556),
("EDDI", 52.4727777778, 13.4038888889),
("EDDK", 50.8658333333, 7.1425),
("EDDL", 51.2894444444, 6.76666666667),
("EDDM", 48.3536111111, 11.7858333333),
("EDDN", 49.4986111111, 11.0780555556),
("EDDP", 51.4238888889, 12.2361111111),
("EDDR", 49.2144444444, 7.10944444444),
("EDDS", 48.6897222222, 9.22194444444),
("EDDT", 52.5594444444, 13.2875),
("EDDV", 52.4608333333, 9.685),
("EDDW", 53.0475, 8.78666666667),
("EDFE", 49.9608333333, 8.64361111111),
("EDFH", 49.9497222222, 7.26388888889),
("EDFM", 49.4725, 8.51361111111),
("EDFQ", 51.0352777778, 8.67888888889),
("EDFV", 49.6063888889, 8.36833333333),
("EDFZ", 49.9688888889, 8.1475),
("EDGE", 50.9927777778, 10.4725),
("EDGS", 50.7075, 8.08194444444),
("EDHI", 53.5352777778, 9.83527777778),
("EDHK", 54.3794444444, 10.145),
("EDHL", 53.8052777778, 10.7191666667),
("EDKA", 50.8227777778, 6.18722222222),
("EDKV", 50.4058333333, 6.52805555556),
("EDKZ", 51.0994444444, 7.60194444444),
("EDLA", 51.4833333333, 7.89916666667),
("EDLC", 51.5302777778, 6.53694444444),
("EDLE", 51.4013888889, 6.93583333333),
("EDLN", 51.2302777778, 6.50444444444),
("EDLP", 51.6141666667, 8.61611111111),
("EDLS", 51.9958333333, 6.84027777778),
("EDLW", 51.5180555556, 7.61222222222),
("EDMA", 48.425, 10.9316666667),
("EDMB", 48.1108333333, 9.76277777778),
("EDME", 48.3961111111, 12.7236111111),
("EDMO", 48.0813888889, 11.2833333333),
("EDMS", 48.9008333333, 12.5180555556),
("EDMV", 48.6363888889, 13.1952777778),
("EDNL", 47.8588888889, 10.0144444444),
("EDNY", 47.6711111111, 9.51138888889),
("EDOP", 53.4269444444, 11.7833333333),
("EDOV", 52.6288888889, 11.8197222222),
("EDPA", 48.7777777778, 10.2644444444),
("EDQC", 50.2625, 10.9958333333),
("EDQD", 49.9841666667, 11.6383333333),
("EDQE", 49.7941666667, 11.1322222222),
("EDQM", 50.2886111111, 11.8547222222),
("EDQP", 49.8627777778, 11.7877777778),
("EDQT", 50.0177777778, 10.5294444444),
("EDRK", 50.3247222222, 7.53083333333),
("EDRT", 49.8633333333, 6.78888888889),
("EDRY", 49.3025, 8.45111111111),
("EDRZ", 49.2094444444, 7.40055555556),
("EDTB", 48.7911111111, 8.18694444444),
("EDTD", 47.9730555556, 8.52222222222),
("EDTF", 48.0202777778, 7.83361111111),
("EDTK", 48.9822222222, 8.33333333333),
("EDTM", 48.0536111111, 9.37277777778),
("EDTY", 49.1180555556, 9.77722222222),
("EDUS", 51.6075, 13.7377777778),
("EDVE", 52.3191666667, 10.5561111111),
("EDVK", 51.4083333333, 9.3775),
("EDVM", 52.1775, 9.94555555556),
("EDWB", 53.5033333333, 8.57333333333),
("EDWD", 53.1430555556, 8.62333333333),
("EDWE", 53.3911111111, 7.22722222222),
("EDWF", 53.2719444444, 7.44277777778),
("EDWI", 53.5047222222, 8.05333333333),
("EDWR", 53.5952777778, 6.70916666667),
("EDWY", 53.7066666667, 7.23),
("EDXF", 54.7716666667, 9.37805555556),
("EDXR", 54.2208333333, 9.60055555556),
("EDXW", 54.9130555556, 8.34027777778),
("EEEI", 59.2594444444, 24.2044444444),
("EEKA", 58.9905555556, 22.8305555556),
("EEKE", 58.2297222222, 22.5094444444),
("EEPU", 58.4188888889, 24.4727777778),
("EETN", 59.4130555556, 24.8327777778),
("EETU", 58.3072222222, 26.6902777778),
("EFET", 68.3625, 23.4241666667),
("EFEU", 61.1161111111, 22.2013888889),
("EFHA", 61.8558333333, 24.7863888889),
("EFHF", 60.2544444444, 25.0427777778),
("EFHK", 60.32, 24.9561111111),
("EFHM", 61.6894444444, 23.0736111111),
("EFHN", 59.8486111111, 23.0833333333),
("EFHV", 60.6544444444, 24.8811111111),
("EFIK", 60.4625, 23.6525),
("EFIM", 61.2491666667, 28.9036111111),
("EFIT", 62.1661111111, 30.0736111111),
("EFIV", 68.6072222222, 27.4052777778),
("EFJO", 62.6588888889, 29.6244444444),
("EFJY", 62.3994444444, 25.6780555556),
("EFKA", 63.1269444444, 23.0513888889),
("EFKE", 65.7816666667, 24.5988888889),
("EFKI", 64.2852777778, 27.6922222222),
("EFKJ", 62.4625, 22.3930555556),
("EFKK", 63.7211111111, 23.1430555556),
("EFKM", 66.7127777778, 27.1566666667),
("EFKS", 65.9875, 29.2391666667),
("EFKT", 67.7008333333, 24.8466666667),
("EFKU", 63.0069444444, 27.7975),
("EFLA", 61.1438888889, 25.6933333333),
("EFLP", 61.0444444444, 28.1441666667),
("EFMA", 60.1219444444, 19.8980555556),
("EFME", 62.9466666667, 23.5188888889),
("EFMI", 61.6863888889, 27.2016666667),
("EFNU", 60.3338888889, 24.2963888889),
("EFOU", 64.93, 25.3544444444),
("EFPI", 61.2455555556, 22.1933333333),
("EFPO", 61.4616666667, 21.7997222222),
("EFPU", 65.4022222222, 26.9469444444),
("EFPY", 63.7316666667, 25.9261111111),
("EFRH", 64.6880555556, 24.6958333333),
("EFRN", 62.0652777778, 28.3563888889),
("EFRO", 66.5647222222, 25.8302777778),
("EFRY", 60.7447222222, 24.1077777778),
("EFSA", 61.9430555556, 28.945),
("EFSE", 61.0622222222, 26.7986111111),
("EFSO", 67.395, 26.6188888889),
("EFTP", 61.4138888889, 23.6041666667),
("EFTS", 61.7733333333, 24.0269444444),
("EFTU", 60.5138888889, 22.2627777778),
("EFUT", 60.8963888889, 26.9383333333),
("EFVA", 63.0511111111, 21.7613888889),
("EFVR", 62.1711111111, 27.8686111111),
("EFYL", 64.0602777778, 24.7158333333),
("EGAA", 54.6575, -6.21583333333),
("EGAB", 54.3988888889, -7.65166666667),
("EGAC", 54.6180555556, -5.8725),
("EGAE", 55.0427777778, -7.16111111111),
("EGBB", 52.4536111111, -1.74777777778),
("EGBE", 52.3697222222, -1.47972222222),
("EGBG", 52.6077777778, -1.03194444444),
("EGBJ", 51.8941666667, -2.16722222222),
("EGBN", 52.92, -1.07916666667),
("EGBO", 52.5175, -2.25972222222),
("EGBP", 51.6680555556, -2.05694444444),
("EGBT", 52.0408333333, -1.09555555556),
("EGCC", 53.3536111111, -2.27472222222),
("EGCD", 53.3380555556, -2.14888888889),
("EGCF", 53.5597222222, -0.858333333333),
("EGDC", 51.0869444444, -4.15027777778),
("EGDG", 50.4405555556, -4.99527777778),
("EGDL", 51.505, -1.99333333333),
("EGDM", 51.1519444444, -1.74722222222),
("EGDR", 50.0858333333, -5.25555555556),
("EGDX", 51.4047222222, -3.43555555556),
("EGDY", 51.0091666667, -2.63861111111),
("EGFE", 51.8330555556, -4.96111111111),
("EGFF", 51.3966666667, -3.34333333333),
("EGFH", 51.6052777778, -4.06777777778),
("EGGD", 51.3825, -2.71888888889),
("EGGP", 53.3336111111, -2.84972222222),
("EGGW", 51.8744444444, -0.368333333333),
("EGHD", 50.4227777778, -4.10583333333),
("EGHH", 50.78, -1.8425),
("EGHI", 50.95, -1.35666666667),
("EGHL", 51.185, -1.03222222222),
("EGJB", 49.4347222222, -2.60194444444),
("EGJJ", 49.2077777778, -2.19527777778),
("EGKA", 50.8355555556, -0.297222222222),
("EGKB", 51.3308333333, 0.0325),
("EGKK", 51.1480555556, -0.190277777778),
("EGLC", 51.505, 0.0541666666667),
("EGLF", 51.2758333333, -0.776111111111),
("EGLJ", 51.6761111111, -1.08083333333),
("EGLK", 51.3238888889, -0.8475),
("EGLL", 51.4775, -0.461388888889),
("EGMC", 51.5713888889, 0.695555555556),
("EGMD", 50.9561111111, 0.939166666667),
("EGMH", 51.3422222222, 1.34611111111),
("EGNB", 53.7194444444, -0.566111111111),
("EGNC", 54.9375, -2.80916666667),
("EGNH", 53.7716666667, -3.02861111111),
("EGNJ", 53.5744444444, -0.350833333333),
("EGNL", 54.1297222222, -3.25611111111),
("EGNM", 53.8658333333, -1.66055555556),
("EGNO", 53.745, -2.88305555556),
("EGNR", 53.1780555556, -2.97777777778),
("EGNS", 54.0833333333, -4.62388888889),
("EGNT", 55.0375, -1.69166666667),
("EGNV", 54.5091666667, -1.42916666667),
("EGNX", 52.8311111111, -1.32805555556),
("EGOD", 52.8116666667, -4.12333333333),
("EGOE", 52.8711111111, -2.53333333333),
("EGOQ", 53.2583333333, -4.37333333333),
("EGOS", 52.7980555556, -2.66777777778),
("EGOV", 53.2480555556, -4.53527777778),
("EGOW", 53.5813888889, -3.05527777778),
("EGOY", 54.8511111111, -4.94777777778),
("EGPA", 58.9580555556, -2.905),
("EGPB", 59.8788888889, -1.29555555556),
("EGPC", 58.4586111111, -3.09277777778),
("EGPD", 57.2041666667, -2.20027777778),
("EGPE", 57.54, -4.05),
("EGPF", 55.8719444444, -4.43305555556),
("EGPH", 55.95, -3.3725),
("EGPI", 55.6819444444, -6.25666666667),
("EGPK", 55.5077777778, -4.58666666667),
("EGPL", 57.4811111111, -7.36277777778),
("EGPM", 60.4322222222, -1.29805555556),
("EGPN", 56.4525, -3.02583333333),
("EGPO", 58.2136111111, -6.32888888889),
("EGPU", 56.4991666667, -6.86916666667),
("EGQL", 56.3727777778, -2.86833333333),
("EGQS", 57.705, -3.33916666667),
("EGRR", 51.3833333333, -0.783333333333),
("EGSC", 52.205, 0.175),
("EGSF", 52.4680555556, -0.251111111111),
("EGSH", 52.6758333333, 1.28277777778),
("EGSS", 51.885, 0.235),
("EGSX", 51.7216666667, 0.154166666667),
("EGSY", 53.3941666667, -1.38833333333),
("EGTC", 52.0722222222, -0.616666666667),
("EGTD", 51.1166666667, -0.534444444444),
("EGTE", 50.7344444444, -3.41388888889),
("EGTG", 51.5194444444, -2.59083333333),
("EGTH", 51.7666666667, 0.25),
("EGTK", 51.8369444444, -1.32),
("EGUB", 51.6161111111, -1.09555555556),
("EGUL", 52.4091666667, 0.560833333333),
("EGUN", 52.3608333333, 0.488333333333),
("EGUW", 52.1272222222, 0.955833333333),
("EGUY", 52.3572222222, -0.107777777778),
("EGVA", 51.6819444444, -1.79),
("EGVN", 51.7497222222, -1.58361111111),
("EGVO", 51.2341666667, -0.942777777778),
("EGWC", 52.64, -2.30555555556),
("EGWU", 51.5527777778, -0.418055555556),
("EGXC", 53.0927777778, -0.165833333333),
("EGXD", 54.1369444444, -1.42),
("EGXE", 54.2922222222, -1.535),
("EGXG", 53.8341666667, -1.19527777778),
("EGXH", 52.3425, 0.772777777778),
("EGXJ", 52.7355555556, -0.648611111111),
("EGXP", 53.3075, -0.550833333333),
("EGXT", 52.6125, -0.476388888889),
("EGXU", 54.0494444444, -1.25194444444),
("EGXW", 53.1661111111, -0.523611111111),
("EGXZ", 54.2055555556, -1.38222222222),
("EGYC", 52.7547222222, 1.35722222222),
("EGYD", 53.0305555556, -0.481111111111),
("EGYE", 52.9622222222, -0.561388888889),
("EGYM", 52.6483333333, 0.550277777778),
("EGYP", -51.8227777778, -58.4472222222),
("EHAM", 52.3086111111, 4.76388888889),
("EHBD", 51.2552777778, 5.60138888889),
("EHBK", 50.9113888889, 5.77),
("EHDL", 52.0605555556, 5.87305555556),
("EHDR", 53.1191666667, 6.12972222222),
("EHEH", 51.45, 5.37444444444),
("EHGG", 53.1194444444, 6.57944444444),
("EHGR", 51.5677777778, 4.93305555556),
("EHKD", 52.9233333333, 4.78055555556),
("EHLE", 52.4602777778, 5.52722222222),
("EHLW", 53.2286111111, 5.76055555556),
("EHRD", 51.9572222222, 4.44166666667),
("EHSB", 52.1269444444, 5.27638888889),
("EHTW", 52.27, 6.87416666667),
("EHVB", 52.1697222222, 4.42611111111),
("EHWO", 51.4488888889, 4.34194444444),
("EICK", 51.8427777778, -8.49194444444),
("EICM", 53.3013888889, -8.93916666667),
("EIDL", 55.0441666667, -8.34083333333),
("EIDW", 53.4211111111, -6.27),
("EIKN", 53.9102777778, -8.81833333333),
("EIKY", 52.1808333333, -9.52361111111),
("EIME", 53.3027777778, -6.44277777778),
("EINN", 52.7019444444, -8.92472222222),
("EISG", 54.28, -8.59916666667),
("EIWF", 52.1869444444, -7.08694444444),
("EKAH", 56.3041666667, 10.6194444444),
("EKBI", 55.7402777778, 9.15166666667),
("EKCH", 55.6177777778, 12.6558333333),
("EKEB", 55.5258333333, 8.55333333333),
("EKGH", 55.9411111111, 12.3822222222),
("EKHO", 56.3966666667, 8.44333333333),
("EKKA", 56.2972222222, 9.12444444444),
("EKLS", 57.2777777778, 11.0013888889),
("EKMB", 54.6991666667, 11.44),
("EKOD", 55.4761111111, 10.3291666667),
("EKPB", 54.8702777778, 9.27916666667),
("EKRK", 55.5855555556, 12.1313888889),
("EKRN", 55.0630555556, 14.7594444444),
("EKSB", 54.9641666667, 9.79166666667),
("EKSN", 57.5033333333, 10.2291666667),
("EKSP", 55.2252777778, 9.26388888889),
("EKSV", 56.55, 9.17277777778),
("EKTS", 57.0686111111, 8.705),
("EKVA", 55.6969444444, 9.19333333333),
("EKVD", 55.4361111111, 9.33083333333),
("EKVG", 62.0636111111, -7.27694444444),
("EKVH", 56.8469444444, 9.45861111111),
("EKVJ", 55.99, 8.35388888889),
("EKVL", 55.7672222222, 12.3433333333),
("EKYT", 57.0927777778, 9.84888888889),
("ELLX", 49.6263888889, 6.21138888889),
("ENAL", 62.5602777778, 6.11),
("ENAN", 69.2925, 16.1441666667),
("ENAT", 69.9769444444, 23.3661111111),
("ENBL", 61.3925, 5.76416666667),
("ENBM", 60.6386111111, 6.50138888889),
("ENBN", 65.4591666667, 12.2136111111),
("ENBO", 67.2688888889, 14.3633333333),
("ENBR", 60.2933333333, 5.21805555556),
("ENBS", 70.6, 29.6925),
("ENCN", 58.2041666667, 8.08527777778),
("ENDI", 60.4166666667, 8.51388888889),
("ENDU", 69.0555555556, 18.5402777778),
("ENEV", 68.4911111111, 16.6780555556),
("ENFB", 59.8955555556, 10.6169444444),
("ENFG", 61.0111111111, 9.29305555556),
("ENFL", 61.5838888889, 5.025),
("ENGM", 60.1938888889, 11.1002777778),
("ENHA", 60.8183333333, 11.0672222222),
("ENHD", 59.345, 5.20833333333),
("ENHK", 70.4866666667, 22.1397222222),
("ENKB", 63.1116666667, 7.82444444444),
("ENKJ", 59.9691666667, 11.0358333333),
("ENKR", 69.725, 29.8875),
("ENLI", 58.1002777778, 6.625),
("ENML", 62.7461111111, 7.2725),
("ENMS", 65.7838888889, 13.2147222222),
("ENNA", 70.0686111111, 24.9733333333),
("ENNO", 59.5655555556, 9.21166666667),
("ENOL", 63.6988888889, 9.60388888889),
("ENRO", 62.5783333333, 11.3422222222),
("ENRY", 59.3788888889, 10.7855555556),
("ENSB", 78.2461111111, 15.4655555556),
("ENSG", 61.1561111111, 7.13638888889),
("ENSN", 59.185, 9.56694444444),
("ENSO", 59.7916666667, 5.34083333333),
("ENSR", 69.7869444444, 20.9594444444),
("ENSS", 70.3552777778, 31.0447222222),
("ENST", 65.9566666667, 12.4688888889),
("ENTC", 69.6833333333, 18.9188888889),
("ENTO", 59.1866666667, 10.2586111111),
("ENVA", 63.4575, 10.9397222222),
("ENZV", 58.8766666667, 5.63777777778),
("EPGD", 54.3775, 18.4661111111),
("EPKK", 50.0775, 19.7847222222),
("EPKM", 50.2383333333, 19.035),
("EPKO", 54.0422222222, 16.2636111111),
("EPKT", 50.4741666667, 19.08),
("EPLL", 51.7219444444, 19.3980555556),
("EPML", 50.3222222222, 21.4619444444),
("EPPO", 52.4211111111, 16.8263888889),
("EPRZ", 50.11, 22.0188888889),
("EPSC", 53.5847222222, 14.9019444444),
("EPSD", 53.3919444444, 14.6336111111),
("EPSK", 54.4788888889, 17.1075),
("EPSN", 53.7905555556, 15.8280555556),
("EPWA", 52.1655555556, 20.9669444444),
("EPWR", 51.1025, 16.8858333333),
("EPZG", 52.1386111111, 15.7986111111),
("ESCF", 58.4022222222, 15.5255555556),
("ESCK", 58.6108333333, 16.1033333333),
("ESCM", 59.8972222222, 17.5883333333),
("ESCN", 59.1808333333, 17.9122222222),
("ESDB", 56.2958333333, 12.8469444444),
("ESDF", 56.2666666667, 15.265),
("ESFH", 58.4091666667, 13.2625),
("ESFI", 56.1836111111, 14.1322222222),
("ESFM", 58.5980555556, 14.1136111111),
("ESFQ", 56.8441666667, 15.4525),
("ESFR", 58.4980555556, 13.0530555556),
("ESFY", 56.785, 13.6016666667),
("ESGG", 57.6627777778, 12.2797222222),
("ESGJ", 57.7575, 14.0686111111),
("ESGK", 58.1697222222, 13.5877777778),
("ESGL", 58.4652777778, 13.1741666667),
("ESGP", 57.7747222222, 11.8702777778),
("ESGR", 58.4563888889, 13.9725),
("ESGT", 58.3180555556, 12.345),
("ESIA", 58.5136111111, 14.5069444444),
("ESIB", 58.4263888889, 12.7141666667),
("ESKA", 60.1327777778, 18.105),
("ESKB", 59.4186111111, 17.8905555556),
("ESKK", 59.3458333333, 14.4958333333),
("ESKM", 60.9577777778, 14.5111111111),
("ESKN", 58.7886111111, 16.9119444444),
("ESKS", 59.3138888889, 17.1091666667),
("ESKV", 59.6758333333, 12.6391666667),
("ESKX", 58.7908333333, 16.5708333333),
("ESMA", 56.6105555556, 15.6047222222),
("ESMG", 56.9502777778, 13.9216666667),
("ESMK", 55.9216666667, 14.0852777778),
("ESML", 55.9447222222, 12.8608333333),
("ESMO", 57.3502777778, 16.4977777778),
("ESMP", 57.2641666667, 13.5991666667),
("ESMQ", 56.6852777778, 16.2875),
("ESMS", 55.53, 13.3713888889),
("ESMT", 56.6911111111, 12.82),
("ESMV", 57.2922222222, 14.1372222222),
("ESMX", 56.9288888889, 14.7277777778),
("ESNA", 63.7383333333, 15.4583333333),
("ESNC", 62.4088888889, 13.7472222222),
("ESND", 62.0477777778, 14.4227777778),
("ESNF", 61.8980555556, 15.7052777778),
("ESNG", 67.1322222222, 20.8144444444),
("ESNH", 61.7680555556, 17.0805555556),
("ESNI", 63.6336111111, 17.9397222222),
("ESNJ", 66.4961111111, 20.1469444444),
("ESNK", 63.0483333333, 17.7686111111),
("ESNL", 64.5480555556, 18.7161111111),
("ESNM", 63.1286111111, 14.8027777778),
("ESNN", 62.5280555556, 17.4438888889),
("ESNO", 63.4083333333, 18.99),
("ESNP", 65.3994444444, 21.2652777778),
("ESNQ", 67.8219444444, 20.3366666667),
("ESNR", 61.19, 14.7125),
("ESNS", 64.6247222222, 21.0766666667),
("ESNT", 62.4811111111, 17.0027777778),
("ESNU", 63.7916666667, 20.2825),
("ESNV", 64.5788888889, 16.8333333333),
("ESNX", 65.59, 19.2816666667),
("ESOE", 59.2236111111, 15.0377777778),
("ESOH", 60.02, 13.5788888889),
("ESOW", 59.5894444444, 16.6336111111),
("ESPA", 65.5436111111, 22.1219444444),
("ESPC", 63.1938888889, 14.5019444444),
("ESPE", 65.8752777778, 20.1497222222),
("ESPJ", 65.8361111111, 21.4713888889),
("ESQO", 59.3863888889, 15.9238888889),
("ESSA", 59.6519444444, 17.9186111111),
("ESSB", 59.3541666667, 17.9413888889),
("ESSD", 60.4219444444, 15.515),
("ESSF", 57.5255555556, 15.8238888889),
("ESSK", 60.5933333333, 16.9513888889),
("ESSL", 58.4061111111, 15.6802777778),
("ESSP", 58.5861111111, 16.2505555556),
("ESST", 60.1575, 12.9911111111),
("ESSU", 59.3508333333, 16.7083333333),
("ESSV", 57.6627777778, 18.3461111111),
("ESUA", 64.5705555556, 19.3141666667),
("ESUD", 64.9608333333, 17.6963888889),
("ESUE", 61.8686111111, 12.6905555556),
("ESUF", 65.1063888889, 20.7605555556),
("ESUK", 67.7647222222, 20.2572222222),
("ESUT", 65.8061111111, 15.0827777778),
("ETAD", 49.9725, 6.6925),
("ETAR", 49.4375, 7.60138888889),
("ETEJ", 49.9202777778, 10.9141666667),
("ETEK", 49.65, 7.3),
("ETEU", 49.6480555556, 9.96638888889),
("ETHB", 52.2783333333, 9.08194444444),
("ETHC", 52.5911111111, 10.0219444444),
("ETHE", 52.2911111111, 7.38694444444),
("ETHF", 51.1144444444, 9.28583333333),
("ETHI", 53.9944444444, 9.57833333333),
("ETHL", 48.2202777778, 9.91),
("ETHM", 50.3658333333, 7.315),
("ETHN", 49.3916666667, 9.95805555556),
("ETHR", 49.2175, 11.1005555556),
("ETHS", 52.9191666667, 10.1836111111),
("ETHT", 51.7677777778, 14.2919444444),
("ETIC", 49.6994444444, 11.9411111111),
("ETID", 50.1691666667, 8.96138888889),
("ETIE", 49.3933333333, 8.65194444444),
("ETIH", 49.2180555556, 11.8361111111),
("ETIN", 49.7430555556, 10.2005555556),
("ETME", 54.6247222222, 9.34138888889),
("ETMN", 53.7675, 8.65833333333),
("ETND", 52.5852777778, 8.34055555556),
("ETNG", 50.9608333333, 6.0425),
("ETNH", 54.3119444444, 9.53805555556),
("ETNJ", 53.5333333333, 7.88861111111),
("ETNL", 53.9180555556, 12.2791666667),
("ETNN", 50.8311111111, 6.65805555556),
("ETNP", 52.3386111111, 7.54111111111),
("ETNR", 51.6636111111, 14.6336111111),
("ETNS", 54.4591666667, 9.51611111111),
("ETNT", 53.5477777778, 7.66722222222),
("ETNU", 53.6019444444, 13.3058333333),
("ETNW", 52.4572222222, 9.42694444444),
("ETOI", 49.6336111111, 11.7672222222),
("ETOR", 49.5633333333, 8.46333333333),
("ETOU", 50.0497222222, 8.32527777778),
("ETSA", 48.0705555556, 10.9058333333),
("ETSB", 50.1736111111, 7.06333333333),
("ETSE", 48.3222222222, 11.9486111111),
("ETSF", 48.2055555556, 11.2669444444),
("ETSH", 51.7677777778, 13.1675),
("ETSI", 48.7155555556, 11.5338888889),
("ETSL", 48.1861111111, 10.8622222222),
("ETSM", 47.9886111111, 10.2394444444),
("ETSN", 48.7108333333, 11.2111111111),
("ETUL", 51.6016666667, 6.1425),
("ETUO", 51.9227777778, 8.30611111111),
("ETUR", 51.1997222222, 6.13194444444),
("FAAB", -28.575, 16.5333333333),
("FAAG", -29.2816666667, 18.8136111111),
("FAAP", -25.9436111111, 29.81),
("FABB", -26.2386111111, 28.3016666667),
("FABE", -32.8969444444, 27.2788888889),
("FABL", -29.0925, 26.3022222222),
("FABM", -28.2483333333, 28.3361111111),
("FABO", -27.3666666667, 26.6291666667),
("FACR", -26.3694444444, 27.35),
("FACT", -33.9647222222, 18.6016666667),
("FACV", -31.5002777778, 19.7258333333),
("FADA", -30.6366666667, 23.92),
("FADB", -24.805, 26.8316666667),
("FADD", -28.1825, 30.2244444444),
("FADN", -29.97, 30.9502777778),
("FAEL", -33.0355555556, 27.8258333333),
("FAEO", -26.4952777778, 29.98),
("FAER", -23.7261111111, 27.6875),
("FAFB", -28.8230555556, 27.9088888889),
("FAGC", -25.9861111111, 28.14),
("FAGG", -34.0052777778, 22.3788888889),
("FAGI", -23.2836111111, 30.6497222222),
("FAGM", -26.2425, 28.1511111111),
("FAGT", -33.2847222222, 26.4980555556),
("FAGY", -29.1219444444, 30.5866666667),
("FAHA", -28.0786111111, 26.8611111111),
("FAHE", -25.9794444444, 29.6186111111),
("FAHG", -26.5061111111, 28.3938888889),
("FAHO", -27.2783333333, 27.9958333333),
("FAHR", -28.235, 29.1061111111),
("FAHS", -24.3686111111, 31.0486111111),
("FAHV", -30.5619444444, 25.5280555556),
("FAJS", -26.1391666667, 28.2458333333),
("FAKD", -26.8708333333, 26.7177777778),
("FAKG", -26.0933333333, 29.4547222222),
("FAKL", -26.2516666667, 29.1944444444),
("FAKM", -28.8027777778, 24.765),
("FAKP", -25.4405555556, 31.9297222222),
("FAKR", -26.0808333333, 27.7255555556),
("FAKS", -27.6605555556, 27.3155555556),
("FAKU", -27.4566666667, 23.4113888889),
("FAKZ", -29.6883333333, 17.0938888889),
("FALA", -25.9383333333, 27.9261111111),
("FALB", -29.1811111111, 27.4530555556),
("FALI", -26.1755555556, 26.1844444444),
("FALT", -23.1597222222, 29.6963888889),
("FALW", -32.9688888889, 18.1602777778),
("FALY", -28.5816666667, 29.7497222222),
("FAMB", -25.6847222222, 29.44),
("FAMG", -30.8572222222, 30.3427777778),
("FAMI", -24.9888888889, 29.2830555556),
("FAMJ", -27.0791666667, 29.7783333333),
("FAMK", -25.7527777778, 25.6119444444),
("FAMM", -25.7983333333, 25.5477777778),
("FAMN", -25.4733333333, 31.5655555556),
("FAMS", -22.3558333333, 29.9863888889),
("FAMU", -27.6258333333, 32.0441666667),
("FANC", -27.7705555556, 29.9766666667),
("FANS", -25.5005555556, 30.9133333333),
("FANY", -24.6858333333, 28.4347222222),
("FAOB", -34.5552777778, 20.2502777778),
("FAOH", -33.6069444444, 22.1888888889),
("FAPB", -23.8452777778, 29.4586111111),
("FAPE", -33.9847222222, 25.6172222222),
("FAPG", -34.0902777778, 23.3277777778),
("FAPH", -23.9369444444, 31.1552777778),
("FAPI", -23.9258333333, 29.4841666667),
("FAPJ", -31.6058333333, 29.5197222222),
("FAPM", -29.6488888889, 30.3986111111),
("FAPN", -25.3336111111, 27.1733333333),
("FAPP", -24.2302777778, 28.9836111111),
("FAPQ", -25.8469444444, 23.5377777778),
("FAPS", -26.6708333333, 27.0816666667),
("FAPY", -26.8891666667, 27.5033333333),
("FAQT", -31.92, 26.8819444444),
("FARB", -28.7408333333, 32.0919444444),
("FARG", -25.6441666667, 27.2711111111),
("FARS", -33.8119444444, 19.9027777778),
("FASB", -29.6891666667, 17.9394444444),
("FASC", -26.5238888889, 29.17),
("FASD", -32.9638888889, 17.9691666667),
("FASI", -26.2483333333, 28.3975),
("FASK", -25.8094444444, 28.1644444444),
("FASS", -27.6483333333, 22.9991666667),
("FASU", -25.9613888889, 29.2086111111),
("FASZ", -24.9608333333, 31.5886111111),
("FATF", -28.26, 22.9930555556),
("FATH", -23.0786111111, 30.3833333333),
("FATN", -29.3191666667, 26.8233333333),
("FATP", -29.0327777778, 26.1575),
("FATT", -26.7763888889, 29.3386111111),
("FATZ", -23.8241666667, 30.3291666667),
("FAUL", -28.3205555556, 31.4163888889),
("FAUP", -28.4008333333, 21.2605555556),
("FAUT", -31.5477777778, 28.6741666667),
("FAVB", -26.9822222222, 24.7286111111),
("FAVG", -29.7705555556, 31.0583333333),
("FAVP", -26.6922222222, 27.7777777778),
("FAVR", -31.6408333333, 18.5447222222),
("FAVV", -26.5661111111, 27.9605555556),
("FAVY", -27.7866666667, 30.7952777778),
("FAWB", -25.6536111111, 28.2241666667),
("FAWI", -25.8322222222, 29.1919444444),
("FAWK", -25.83, 28.2225),
("FAWM", -27.9977777778, 26.6694444444),
("FAWS", -27.8472222222, 26.3497222222),
("FAYP", -33.9, 18.4980555556),
("FAZR", -25.5988888889, 26.0422222222),
("FBFT", -21.1594444444, 27.4744444444),
("FBJW", -24.6022222222, 24.6908333333),
("FBKE", -17.8327777778, 25.1622222222),
("FBMN", -19.9725, 23.4308333333),
("FBOR", -21.2663888889, 25.3202777778),
("FBSK", -24.555, 25.9180555556),
("FBSP", -22.0583333333, 27.8286111111),
("FCBB", -4.25166666667, 15.2527777778),
("FCOI", 1.58944444444, 18.0469444444),
("FCOM", -0.0213888888889, 15.5752777778),
("FCOO", -0.531111111111, 15.95),
("FCOU", 1.61583333333, 16.0377777778),
("FCPA", -3.48333333333, 12.6166666667),
("FCPD", -4.20777777778, 12.6608333333),
("FCPP", -4.81333333333, 11.8858333333),
("FDMS", -26.5288888889, 31.3075),
("FEFF", 4.39833333333, 18.5186111111),
("FEFG", 4.78444444444, 22.7813888889),
("FEFI", 10.2361111111, 22.7172222222),
("FEFM", 5.84694444444, 20.6494444444),
("FEFN", 8.42666666667, 20.635),
("FEFO", 5.95805555556, 15.6375),
("FEFR", 6.52805555556, 21.9888888889),
("FEFT", 4.22138888889, 15.7861111111),
("FEFY", 45.1541666667, -89.1108333333),
("FGBT", 1.90527777778, 9.80555555556),
("FGSL", 3.75527777778, 8.70861111111),
("FHAW", -7.96944444444, -14.3936111111),
("FIMP", -20.43, 57.6833333333),
("FIMR", -19.7577777778, 63.3619444444),
("FJDG", -7.31305555556, 72.4108333333),
("FKKC", 4.08916666667, 9.36027777778),
("FKKD", 4.00583333333, 9.71944444444),
("FKKI", 4.47305555556, 14.3636111111),
("FKKL", 10.4513888889, 14.2572222222),
("FKKM", 5.63666666667, 10.7505555556),
("FKKN", 7.35694444444, 13.5591666667),
("FKKR", 9.33583333333, 13.37),
("FKKU", 5.53694444444, 10.3541666667),
("FKKV", 6.03916666667, 10.1225),
("FKKY", 3.83527777778, 11.5236111111),
("FLKE", -12.5727777778, 27.8938888889),
("FLKL", -14.9975, 22.6475),
("FLLI", -17.8216666667, 25.8225),
("FLLS", -15.3305555556, 28.4525),
("FLMA", -11.1380555556, 28.875),
("FLMF", -13.2586111111, 31.9363888889),
("FLMG", -15.2544444444, 23.1622222222),
("FLML", -12.5647222222, 28.2986111111),
("FLND", -12.9980555556, 28.6647222222),
("FLSO", -12.9002777778, 28.1497222222),
("FLZB", -13.5386111111, 23.1097222222),
("FMCH", -11.5336111111, 43.2716666667),
("FMCI", -12.2980555556, 43.7663888889),
("FMCN", -11.7105555556, 43.2436111111),
("FMCV", -12.1316666667, 44.4302777778),
("FMCZ", -12.8047222222, 45.2811111111),
("FMEE", -20.8869444444, 55.5102777778),
("FMEP", -21.3208333333, 55.4247222222),
("FMMI", -18.7966666667, 47.4786111111),
("FMMN", -19.5627777778, 45.4508333333),
("FMMS", -17.0938888889, 49.8158333333),
("FMMT", -18.1094444444, 49.3925),
("FMMV", -20.2847222222, 44.3175),
("FMNA", -12.3491666667, 49.2916666667),
("FMNC", -16.1638888889, 49.7736111111),
("FMND", -14.6516666667, 49.6205555556),
("FMNE", -13.1883333333, 48.9877777778),
("FMNH", -14.9991666667, 50.32),
("FMNL", -14.6294444444, 47.7636111111),
("FMNM", -15.6672222222, 46.3516666667),
("FMNN", -13.3122222222, 48.3138888889),
("FMNQ", -16.7419444444, 44.4813888889),
("FMNR", -15.4366666667, 49.6883333333),
("FMNS", -14.2786111111, 50.1747222222),
("FMNV", -13.3758333333, 50.0027777778),
("FMNW", -14.8986111111, 47.9938888889),
("FMNZ", -13.4847222222, 48.6325),
("FMSD", -25.0380555556, 46.9561111111),
("FMSF", -21.4413888889, 47.1116666667),
("FMSG", -22.8052777778, 47.8205555556),
("FMSK", -22.1197222222, 48.0216666667),
("FMSM", -21.2016666667, 48.3580555556),
("FMSR", -21.7536111111, 43.3752777778),
("FMST", -23.3833333333, 43.7283333333),
("FNBC", -6.26972222222, 14.2469444444),
("FNBG", -12.6088888889, 13.4036111111),
("FNCA", -5.59694444444, 12.1883333333),
("FNCH", -7.3575, 20.8036111111),
("FNCX", -8.37361111111, 18.9236111111),
("FNCZ", -11.8936111111, 22.9161111111),
("FNGI", -17.0447222222, 15.6869444444),
("FNHU", -12.8086111111, 15.7602777778),
("FNKU", -12.4044444444, 16.9472222222),
("FNLB", -12.3711111111, 13.5363888889),
("FNLU", -8.85833333333, 13.2311111111),
("FNMA", -9.525, 16.3122222222),
("FNME", -14.6575, 17.7197222222),
("FNMO", -15.2611111111, 12.1466666667),
("FNNG", -7.75444444444, 15.2875),
("FNPA", -10.7219444444, 13.7652777778),
("FNSA", -9.68888888889, 20.4316666667),
("FNSO", -6.14083333333, 12.3716666667),
("FNTO", -7.14722222222, 14.2480555556),
("FNUE", -11.7680555556, 19.8975),
("FNUG", -7.60305555556, 15.0277777778),
("FNXA", -16.7552777778, 14.9652777778),
("FOGO", 1.54111111111, 11.5808333333),
("FOGQ", -0.665277777778, 13.6730555556),
("FOGR", -0.704166666667, 10.2455555556),
("FOOB", 2.07555555556, 11.4930555556),
("FOOD", -1.5375, 13.2691666667),
("FOOG", -0.711666666667, 8.75416666667),
("FOOH", -1.57472222222, 9.26277777778),
("FOOK", 0.579166666667, 12.8908333333),
("FOOL", 0.458333333333, 9.41222222222),
("FOOM", 0.775555555556, 11.5525),
("FOON", -1.65611111111, 13.4377777778),
("FOOR", -0.826388888889, 12.7466666667),
("FOOT", -2.88888888889, 10.9194444444),
("FPPR", 1.66277777778, 7.41166666667),
("FPST", 0.378055555556, 6.71194444444),
("FQBR", -19.7963888889, 34.9075),
("FQCB", -14.82, 36.5319444444),
("FQCH", -19.1511111111, 33.4288888889),
("FQES", -15.7341666667, 32.7566666667),
("FQIN", -23.8763888889, 35.4083333333),
("FQLC", -13.2738888889, 35.2661111111),
("FQLU", -15.0330555556, 40.6716666667),
("FQMA", -25.9208333333, 32.5725),
("FQMD", -11.6727777778, 39.5630555556),
("FQMP", -11.3616666667, 40.3547222222),
("FQMR", -13.225, 37.5519444444),
("FQNC", -14.4880555556, 40.7122222222),
("FQNP", -15.1055555556, 39.2816666667),
("FQPB", -12.9866666667, 40.5222222222),
("FQQL", -17.8555555556, 36.8691666667),
("FQSG", -15.6025, 32.7730555556),
("FQTT", -16.1047222222, 33.64),
("FQUG", -14.7044444444, 34.3522222222),
("FQVL", -22.0183333333, 35.3130555556),
("FSAL", -7.00472222222, 52.7261111111),
("FSDR", -5.69583333333, 53.6544444444),
("FSFA", -10.1094444444, 51.1761111111),
("FSIA", -4.67416666667, 55.5216666667),
("FSPP", -4.31916666667, 55.6913888889),
("FSSC", -7.16416666667, 56.2638888889),
("FTTA", 9.15111111111, 18.3794444444),
("FTTC", 13.8469444444, 20.8441666667),
("FTTD", 8.62027777778, 16.0683333333),
("FTTJ", 12.1336111111, 15.0338888889),
("FTTP", 9.37916666667, 14.9258333333),
("FTTY", 17.9169444444, 19.1108333333),
("FVBU", -20.0172222222, 28.6177777778),
("FVCP", -17.7513888889, 30.9244444444),
("FVCZ", -21.0080555556, 31.5783333333),
("FVFA", -18.0961111111, 25.8391666667),
("FVGR", -18.9775, 32.4505555556),
("FVHA", -17.9316666667, 31.0927777778),
("FVKB", -16.5197222222, 28.8847222222),
("FVMT", -17.4316666667, 32.1844444444),
("FVMU", -18.9975, 32.6272222222),
("FVMV", -20.0552777778, 30.8588888889),
("FVSH", -20.2894444444, 30.0883333333),
("FVTL", -19.4366666667, 29.8616666667),
("FVWN", -18.6297222222, 27.0208333333),
("FVZC", -19.0286111111, 29.7219444444),
("FWCL", -15.6788888889, 34.9738888889),
("FWKA", -9.95333333333, 33.8927777778),
("FWKG", -13.0144444444, 33.4683333333),
("FWMG", 30.8377777778, -85.1816666667),
("FWUU", -11.4447222222, 34.0116666667),
("FXMM", -29.4622222222, 27.5525),
("FXMU", -29.3038888889, 27.5033333333),
("FZAA", -4.38555555556, 15.4444444444),
("FZAB", -4.32472222222, 15.3283333333),
("FZAG", -5.93083333333, 12.3516666667),
("FZAI", -5.91805555556, 12.4475),
("FZAM", -5.79944444444, 13.4408333333),
("FZBO", -3.31111111111, 17.3816666667),
("FZCA", -5.03555555556, 18.7855555556),
("FZEA", 0.0225, 18.2886111111),
("FZFD", 4.25305555556, 20.9752777778),
("FZFK", 3.23527777778, 19.7711111111),
("FZFP", 4.1575, 21.6508333333),
("FZGA", 2.17055555556, 21.4966666667),
("FZIA", 0.5175, 25.155),
("FZJH", 2.8275, 27.5880555556),
("FZKA", 1.56555555556, 30.2208333333),
("FZKJ", 2.81777777778, 24.7938888889),
("FZMA", -2.30888888889, 28.8086111111),
("FZNA", -1.67055555556, 29.2383333333),
("FZOA", -2.91916666667, 25.9152777778),
("FZQA", -11.5911111111, 27.5308333333),
("FZQM", -10.7658333333, 25.5055555556),
("FZRF", -5.87555555556, 29.25),
("FZSA", -8.64194444444, 25.2527777778),
("FZTL", -9.46944444444, 25.7588888889),
("FZUA", -5.9, 22.4691666667),
("FZWA", -6.12111111111, 23.5688888889),
("GABS", 12.5333333333, -7.94972222222),
("GAGO", 16.2483333333, -0.00527777777778),
("GAKY", 14.4311111111, -11.4394444444),
("GAMB", 14.5127777778, -4.07944444444),
("GANR", 15.2386111111, -9.57638888889),
("GATB", 16.7302777778, -3.0075),
("GATS", 20.2461111111, 0.980833333333),
("GBYD", 13.3377777778, -16.6519444444),
("GCFV", 28.4525, -13.8636111111),
("GCHI", 27.8147222222, -17.8869444444),
("GCLA", 28.6263888889, -17.7555555556),
("GCLP", 27.9316666667, -15.3863888889),
("GCRR", 28.9452777778, -13.605),
("GCTS", 28.0444444444, -16.5722222222),
("GCXO", 28.4825, -16.3413888889),
("GEML", 35.2797222222, -2.95611111111),
("GFHA", 8.39444444444, -13.1283333333),
("GFLL", 8.61638888889, -13.1952777778),
("GGCF", 11.2880555556, -15.1805555556),
("GGOV", 11.8947222222, -15.6536111111),
("GLMR", 6.28888888889, -10.7586111111),
("GLRB", 6.23361111111, -10.3622222222),
("GMAA", 30.3811111111, -9.54611111111),
("GMAT", 28.4480555556, -11.1611111111),
("GMFF", 33.9272222222, -4.97777777778),
("GMFI", 33.5052777778, -5.15277777778),
("GMFK", 31.9488888889, -4.40055555556),
("GMFM", 33.8788888889, -5.515),
("GMFN", 35.1533333333, -2.92),
("GMFO", 34.7869444444, -1.92388888889),
("GMMC", 33.5533333333, -7.66138888889),
("GMME", 34.0513888889, -6.75138888889),
("GMMF", 29.3688888889, -10.18),
("GMMI", 31.4038888889, -9.68472222222),
("GMMN", 33.3677777778, -7.58777777778),
("GMMX", 31.6066666667, -8.03611111111),
("GMMY", 34.2988888889, -6.59583333333),
("GMMZ", 30.9388888889, -6.90916666667),
("GMTA", 35.1769444444, -3.83944444444),
("GMTN", 35.5941666667, -5.32),
("GMTT", 35.7266666667, -5.91666666667),
("GOGG", 12.5555555556, -16.2816666667),
("GOGK", 12.88, -14.9552777778),
("GOGS", 12.41, -16.7461111111),
("GOOK", 14.1466666667, -16.0511111111),
("GOOY", 14.7394444444, -17.49),
("GOSS", 16.0497222222, -16.4611111111),
("GOTB", 14.8472222222, -12.4680555556),
("GOTK", 12.5722222222, -12.2202777778),
("GOTT", 13.7366666667, -13.6530555556),
("GQNA", 16.7111111111, -9.63777777778),
("GQND", 18.5702777778, -11.4230555556),
("GQNF", 16.5897222222, -11.4061111111),
("GQNI", 16.6219444444, -7.31444444444),
("GQNK", 16.1594444444, -13.5075),
("GQNN", 18.0977777778, -15.9477777778),
("GQNS", 15.1794444444, -12.2072222222),
("GQPA", 20.5066666667, -13.0430555556),
("GQPP", 20.9283333333, -17.0311111111),
("GQPT", 25.2366666667, -11.5886111111),
("GUFA", 10.3505555556, -13.5691666667),
("GUFH", 10.0355555556, -10.7697222222),
("GULB", 11.3261111111, -12.2869444444),
("GUMA", 8.48111111111, -9.52583333333),
("GUNZ", 7.80583333333, -8.70166666667),
("GVAC", 16.7413888889, -22.9494444444),
("GVBA", 16.1366666667, -22.8888888889),
("GVFM", 14.9244444444, -23.4933333333),
("GVMA", 15.1558333333, -23.2136111111),
("GVSN", 16.5883333333, -24.2844444444),
("GVSV", 16.8338888889, -25.0566666667),
("HAAB", 8.97694444444, 38.8),
("HAAL", 9.00361111111, 38.7255555556),
("HABD", 11.6080555556, 37.3213888889),
("HADR", 9.625, 41.8541666667),
("HAGM", 8.12833333333, 34.5633333333),
("HAGN", 12.5183333333, 37.4322222222),
("HAHM", 8.71555555556, 39.0080555556),
("HAJM", 7.66583333333, 36.8163888889),
("HALL", 11.9747222222, 38.9797222222),
("HAMK", 13.4672222222, 39.5333333333),
("HBBA", -3.32388888889, 29.3183333333),
("HCMB", 3.09861111111, 43.6241666667),
("HCMH", 9.51805555556, 44.0886111111),
("HCMI", 10.3891666667, 44.9408333333),
("HCMK", -0.377222222222, 42.4591666667),
("HCMM", 2.01361111111, 45.3047222222),
("HEAR", 31.0733333333, 33.8358333333),
("HEAT", 27.0463888889, 31.0119444444),
("HEAX", 31.1838888889, 29.9488888889),
("HEBL", 22.3758333333, 31.6116666667),
("HECA", 30.1219444444, 31.4055555556),
("HECW", 30.1161111111, 30.9152777778),
("HEEM", 30.0744444444, 31.1919444444),
("HEGN", 27.1838888889, 33.7983333333),
("HEGR", 31.0688888889, 34.1291666667),
("HELX", 25.6708333333, 32.7063888889),
("HEMM", 31.3252777778, 27.2216666667),
("HEPS", 31.2794444444, 32.24),
("HESC", 28.6852777778, 34.0625),
("HESN", 23.9641666667, 32.8197222222),
("HETR", 28.2088888889, 33.6452777778),
("HKEL", 0.404166666667, 35.2380555556),
("HKGA", -0.463333333333, 39.6480555556),
("HKIS", 0.339166666667, 37.5908333333),
("HKKG", 0.271111111111, 34.7872222222),
("HKKI", -0.0861111111111, 34.7288888889),
("HKKT", 0.971944444444, 34.9583333333),
("HKLK", 4.20416666667, 34.3480555556),
("HKLO", 3.12194444444, 35.6086111111),
("HKLU", -2.25222222222, 40.9130555556),
("HKLY", 2.76305555556, 36.7183333333),
("HKMA", 3.93361111111, 41.8441666667),
("HKMB", 2.345, 37.9991666667),
("HKMK", 0.230277777778, 38.1702777778),
("HKML", -3.22916666667, 40.1016666667),
("HKMO", -4.03472222222, 39.5941666667),
("HKMY", 3.46972222222, 39.1013888889),
("HKNI", -0.368888888889, 36.98),
("HKNV", -0.787777777778, 36.4333333333),
("HKNW", -1.32166666667, 36.8147222222),
("HKNY", -0.0608333333333, 37.0386111111),
("HKRE", -1.27722222222, 36.8622222222),
("HKWJ", 1.73305555556, 40.0913888889),
("HLFL", 28.7952777778, 22.0808333333),
("HLGL", 28.6383333333, 21.4377777778),
("HLGT", 25.1455555556, 10.1425),
("HLKF", 24.1786111111, 23.3138888889),
("HLLB", 32.0966666667, 20.2694444444),
("HLLQ", 32.7886111111, 21.9641666667),
("HLLS", 26.9869444444, 14.4725),
("HLLT", 32.6633333333, 13.1588888889),
("HLMB", 30.3780555556, 19.5763888889),
("HLNF", 30.5, 18.5269444444),
("HLON", 29.11, 15.9655555556),
("HLRA", 29.4697222222, 17.9311111111),
("HLTD", 30.1516666667, 9.71527777778),
("HLZA", 28.59, 17.2941666667),
("HRYG", -1.67694444444, 29.2586111111),
("HRYR", -1.96861111111, 30.1394444444),
("HRZA", -2.46222222222, 28.9077777778),
("HSDN", 19.1536111111, 30.43),
("HSDZ", 11.7858333333, 34.3363888889),
("HSFS", 13.615, 25.3247222222),
("HSKA", 15.3858333333, 36.3280555556),
("HSNL", 12.0536111111, 24.9552777778),
("HSOB", 13.1530555556, 30.2325),
("HSSJ", 4.87194444444, 31.6011111111),
("HSSM", 9.55861111111, 31.6525),
("HSSP", 19.5763888889, 37.2158333333),
("HSSS", 15.5894444444, 32.5530555556),
("HSWW", 7.72555555556, 27.9794444444),
("HTAR", -3.36777777778, 36.6333333333),
("HTDA", -6.87805555556, 39.2025),
("HTDO", -6.17027777778, 35.7525),
("HTIR", -7.66861111111, 35.7519444444),
("HTKJ", -3.42916666667, 37.0744444444),
("HTLM", -3.37611111111, 35.8180555556),
("HTMD", -3.51416666667, 33.6188888889),
("HTMS", -3.36277777778, 37.3233333333),
("HTMT", -10.3388888889, 40.1816666667),
("HTMW", -2.44444444444, 32.9325),
("HTNG", -6.71722222222, 38.1536111111),
("HTPE", -5.25722222222, 39.8113888889),
("HTTG", -5.09222222222, 39.0711111111),
("HTZA", -6.22194444444, 39.2247222222),
("HUEN", 0.0422222222222, 32.4433333333),
("HUGU", 2.80555555556, 32.2716666667),
("HUSO", 1.7275, 33.6227777778),
("KABI", 32.4111111111, -99.6816666667),
("KACK", 41.2527777778, -70.06),
("KACT", 31.6111111111, -97.2302777778),
("KACY", 39.4575, -74.5769444444),
("KADM", 34.3030555556, -97.0194444444),
("KADW", 38.8105555556, -76.8669444444),
("KAEX", 31.3272222222, -92.5483333333),
("KAGS", 33.3697222222, -81.9644444444),
("KAKR", 41.0375, -81.4666666667),
("KALB", 42.7480555556, -73.8027777778),
("KALI", 27.7408333333, -98.0269444444),
("KAMA", 35.2191666667, -101.705833333),
("KANB", 33.5880555556, -85.8580555556),
("KAND", 34.495, -82.7091666667),
("KAOO", 40.2963888889, -78.32),
("KAPG", 39.4661111111, -76.1694444444),
("KARA", 30.0375, 91.8838888889),
("KART", 43.9916666667, -76.0216666667),
("KATL", 33.6402777778, -84.4269444444),
("KAUG", 44.3205555556, -69.7972222222),
("KAUS", 30.1944444444, -97.6697222222),
("KBAB", 39.1358333333, -121.436388889),
("KBAD", 32.5016666667, -93.6625),
("KBCT", 26.3783333333, -80.1075),
("KBDE", 48.7283333333, -94.6122222222),
("KBDL", 41.9388888889, -72.6830555556),
("KBDR", 41.1633333333, -73.1261111111),
("KBED", 42.4697222222, -71.2888888889),
("KBFI", 47.5297222222, -122.301944444),
("KBFL", 35.4333333333, -119.056666667),
("KBFM", 30.6263888889, -88.0677777778),
("KBGR", 44.8072222222, -68.8280555556),
("KBHM", 33.5627777778, -86.7533333333),
("KBIF", 31.8494444444, -106.38),
("KBIX", 30.4108333333, -88.9236111111),
("KBKF", 39.7016666667, -104.751388889),
("KBLI", 48.7925, -122.5375),
("KBLV", 38.545, -89.835),
("KBNA", 36.1244444444, -86.6780555556),
("KBOI", 43.5641666667, -116.222777778),
("KBOS", 42.3641666667, -71.005),
("KBPT", 29.9508333333, -94.0205555556),
("KBRO", 25.9066666667, -97.4258333333),
("KBTR", 30.5330555556, -91.1494444444),
("KBTV", 44.4716666667, -73.1530555556),
("KBUF", 42.9402777778, -78.7319444444),
("KBUR", 34.2005555556, -118.358611111),
("KBWI", 39.1752777778, -76.6683333333),
("KBYH", 35.9641666667, -89.9433333333),
("KBYS", 35.2802777778, -116.63),
("KCAE", 33.9386111111, -81.1194444444),
("KCAR", 46.8713888889, -68.0177777778),
("KCBM", 33.6438888889, -88.4436111111),
("KCDC", 37.7008333333, -113.098611111),
("KCDS", 34.4336111111, -100.288055556),
("KCEF", 42.1980555556, -72.5341666667),
("KCEW", 30.7786111111, -86.5219444444),
("KCFD", 30.7155555556, -96.3311111111),
("KCHA", 35.0352777778, -85.2036111111),
("KCHS", 32.8986111111, -80.0402777778),
("KCIC", 39.7952777778, -121.858333333),
("KCLE", 41.4116666667, -81.8497222222),
("KCLL", 30.5883333333, -96.3636111111),
("KCLT", 35.2138888889, -80.9430555556),
("KCMH", 39.9977777778, -82.8916666667),
("KCNM", 32.3372222222, -104.263055556),
("KCNW", 31.6377777778, -97.0738888889),
("KCOF", 28.2347222222, -80.61),
("KCOS", 38.8055555556, -104.7),
("KCOT", 28.4580555556, -99.22),
("KCOU", 38.8180555556, -92.2194444444),
("KCPR", 42.9077777778, -106.464166667),
("KCRP", 27.7702777778, -97.5011111111),
("KCTB", 48.6083333333, -112.376111111),
("KCVG", 39.0461111111, -84.6619444444),
("KCVS", 34.3825, -103.321944444),
("KCXL", 32.6694444444, -115.513055556),
("KCXO", 30.3516666667, -95.4144444444),
("KCYS", 41.1555555556, -104.811666667),
("KDAL", 32.8469444444, -96.8516666667),
("KDAY", 39.9022222222, -84.2191666667),
("KDCA", 38.8519444444, -77.0375),
("KDEN", 39.8583333333, -104.666944444),
("KDET", 42.4091666667, -83.0097222222),
("KDFW", 32.8963888889, -97.0375),
("KDHN", 31.3211111111, -85.4494444444),
("KDHT", 36.0225, -102.547222222),
("KDLF", 29.3594444444, -100.777777778),
("KDLH", 46.8419444444, -92.1936111111),
("KDMA", 32.1663888889, -110.883055556),
("KDOV", 39.13, -75.4663888889),
("KDPA", 41.9077777778, -88.2486111111),
("KDRI", 30.8316666667, -93.3397222222),
("KDRO", 37.1513888889, -107.753611111),
("KDRT", 29.3727777778, -100.925833333),
("KDSM", 41.5338888889, -93.6625),
("KDTW", 42.2122222222, -83.3533333333),
("KDUG", 31.4688888889, -109.603611111),
("KDYS", 32.4205555556, -99.8544444444),
("KECG", 36.2605555556, -76.1744444444),
("KEDW", 34.9052777778, -117.883611111),
("KEFD", 29.6072222222, -95.1586111111),
("KEGP", 28.7, -100.479444444),
("KEKN", 38.8894444444, -79.8569444444),
("KELD", 33.2208333333, -92.8130555556),
("KELP", 31.8066666667, -106.377777778),
("KEND", 36.3397222222, -97.9161111111),
("KENV", 40.7186111111, -114.030833333),
("KESF", 31.3947222222, -92.2955555556),
("KEWN", 35.0727777778, -77.0427777778),
("KEWR", 40.6922222222, -74.1686111111),
("KEYW", 24.5561111111, -81.7594444444),
("KFAF", 37.1325, -76.6086111111),
("KFAT", 36.7761111111, -119.718055556),
("KFCS", 38.6783333333, -104.756388889),
("KFFO", 39.8261111111, -84.0483333333),
("KFHU", 31.5883333333, -110.344166667),
("KFLL", 26.0725, -80.1525),
("KFLO", 34.1852777778, -79.7238888889),
("KFLV", 39.3683333333, -94.9144444444),
("KFME", 39.0852777778, -76.7591666667),
("KFMH", 41.6583333333, -70.5213888889),
("KFMN", 36.7411111111, -108.229722222),
("KFMY", 26.5863888889, -81.8630555556),
("KFOD", 42.5513888889, -94.1925),
("KFOE", 38.9508333333, -95.6636111111),
("KFOK", 40.8436111111, -72.6316666667),
("KFRI", 39.055, -96.7644444444),
("KFSI", 34.6497222222, -98.4019444444),
("KFSM", 35.3363888889, -94.3672222222),
("KFTK", 37.9069444444, -85.9719444444),
("KFTW", 32.8197222222, -97.3622222222),
("KFXE", 26.1972222222, -80.1705555556),
("KFYV", 36.005, -94.17),
("KGAG", 36.2952777778, -99.7763888889),
("KGCK", 37.9275, -100.724166667),
("KGEG", 47.6197222222, -117.533611111),
("KGFK", 47.9491666667, -97.1761111111),
("KGGG", 32.3847222222, -94.7113888889),
("KGLS", 29.2652777778, -94.8602777778),
("KGNT", 35.1652777778, -107.900555556),
("KGNV", 29.69, -82.2716666667),
("KGRB", 44.485, -88.1294444444),
("KGRF", 47.0791666667, -122.580555556),
("KGRK", 31.0672222222, -97.8288888889),
("KGRR", 42.8808333333, -85.5227777778),
("KGSB", 35.3391666667, -77.9605555556),
("KGTB", 44.0555555556, -75.7194444444),
("KGTF", 47.4819444444, -111.370555556),
("KGUS", 40.6480555556, -86.1519444444),
("KGVT", 33.0677777778, -96.0652777778),
("KGVW", 38.8433333333, -94.5605555556),
("KGWO", 33.4941666667, -90.0844444444),
("KHBR", 34.9911111111, -99.0511111111),
("KHFD", 41.7361111111, -72.65),
("KHHR", 33.9227777778, -118.335),
("KHIB", 47.3863888889, -92.8388888889),
("KHIF", 41.1238888889, -111.972777778),
("KHKY", 35.7411111111, -81.3894444444),
("KHLN", 46.6066666667, -111.9825),
("KHLR", 31.1386111111, -97.7144444444),
("KHMN", 32.8525, -106.106388889),
("KHOB", 32.6875, -103.216944444),
("KHON", 44.385, -98.2283333333),
("KHOP", 36.6683333333, -87.4961111111),
("KHOU", 29.6452777778, -95.2788888889),
("KHPN", 41.0669444444, -73.7075),
("KHRL", 26.2283333333, -97.6541666667),
("KHRO", 36.2613888889, -93.1547222222),
("KHRT", 30.4277777778, -86.6891666667),
("KHST", 25.4883333333, -80.3836111111),
("KHTL", 44.3597222222, -84.6711111111),
("KHUA", 34.6786111111, -86.6847222222),
("KHUF", 39.4513888889, -87.3075),
("KHUL", 46.1230555556, -67.7919444444),
("KHVR", 48.5427777778, -109.762222222),
("KHWO", 26.0013888889, -80.2402777778),
("KIAB", 37.6227777778, -97.2672222222),
("KIAD", 38.9444444444, -77.4555555556),
("KIAG", 43.1072222222, -78.9461111111),
("KIAH", 29.9802777778, -95.3397222222),
("KICT", 37.6497222222, -97.4330555556),
("KIKK", 41.0713888889, -87.8461111111),
("KIKR", 35.04, -106.609166667),
("KILG", 39.6786111111, -75.6063888889),
("KILM", 34.2705555556, -77.9025),
("KIND", 39.7172222222, -86.2941666667),
("KINK", 31.7794444444, -103.201111111),
("KINL", 48.5661111111, -93.4030555556),
("KINS", 36.5869444444, -115.673333333),
("KINT", 36.1336111111, -80.2219444444),
("KIPL", 32.8341666667, -115.578611111),
("KIPT", 41.2419444444, -76.9211111111),
("KISN", 48.1777777778, -103.642222222),
("KISP", 40.795, -73.1),
("KJAN", 32.3111111111, -90.0758333333),
("KJAX", 30.4938888889, -81.6877777778),
("KJBR", 35.8316666667, -90.6461111111),
("KJFK", 40.6397222222, -73.7788888889),
("KLAN", 42.7786111111, -84.5872222222),
("KLAS", 36.0802777778, -115.152222222),
("KLAX", 33.9425, -118.408055556),
("KLBB", 33.6636111111, -101.822777778),
("KLCH", 30.1261111111, -93.2233333333),
("KLCK", 39.8136111111, -82.9277777778),
("KLFI", 37.0827777778, -76.3602777778),
("KLFK", 31.2338888889, -94.75),
("KLFT", 30.2052777778, -91.9875),
("KLGA", 40.7772222222, -73.8725),
("KLGB", 33.8175, -118.151388889),
("KLHW", 31.8888888889, -81.5622222222),
("KLIT", 34.7294444444, -92.2241666667),
("KLNA", 26.5927777778, -80.085),
("KLNK", 40.8508333333, -96.7591666667),
("KLOU", 38.2277777778, -85.6636111111),
("KLRD", 27.5436111111, -99.4613888889),
("KLRF", 34.9180555556, -92.1463888889),
("KLSF", 32.3372222222, -84.9911111111),
("KLSV", 36.2361111111, -115.034166667),
("KLTS", 34.6669444444, -99.2666666667),
("KLUF", 33.535, -112.383055556),
("KLUK", 39.1033333333, -84.4186111111),
("KMAF", 31.9425, -102.201666667),
("KMCC", 38.6675, -121.400555556),
("KMCF", 27.8491666667, -82.5211111111),
("KMCI", 39.2975, -94.7138888889),
("KMCN", 32.6927777778, -83.6491666667),
("KMCO", 28.4288888889, -81.3158333333),
("KMDT", 40.1933333333, -76.7633333333),
("KMDW", 41.7858333333, -87.7522222222),
("KMEM", 35.0422222222, -89.9766666667),
("KMER", 37.3802777778, -120.568055556),
("KMFE", 26.1758333333, -98.2386111111),
("KMGE", 33.9152777778, -84.5161111111),
("KMHR", 38.5538888889, -121.2975),
("KMIA", 25.7930555556, -80.2905555556),
("KMIB", 48.4155555556, -101.3575),
("KMIV", 39.3677777778, -75.0722222222),
("KMKE", 42.9472222222, -87.8963888889),
("KMKL", 35.5997222222, -88.9155555556),
("KMKO", 35.6575, -95.3613888889),
("KMLB", 28.1025, -80.645),
("KMLC", 34.8822222222, -95.7833333333),
("KMLT", 45.6477777778, -68.6855555556),
("KMLU", 32.5108333333, -92.0375),
("KMMV", 45.1944444444, -123.135833333),
("KMNM", 45.1263888889, -87.6383333333),
("KMOB", 30.6913888889, -88.2427777778),
("KMOD", 37.6255555556, -120.954166667),
("KMOT", 48.2591666667, -101.280277778),
("KMPV", 44.2033333333, -72.5622222222),
("KMQT", 46.5338888889, -87.5616666667),
("KMSN", 43.1397222222, -89.3375),
("KMSP", 44.8802777778, -93.2166666667),
("KMSS", 44.9358333333, -74.8452777778),
("KMSY", 29.9933333333, -90.2577777778),
("KMTC", 42.6127777778, -82.8316666667),
("KMUI", 40.4347222222, -76.5691666667),
("KMUO", 43.0433333333, -115.872222222),
("KMWH", 47.2075, -119.32),
("KMWL", 32.7813888889, -98.06),
("KMXF", 32.3791666667, -86.3625),
("KMYR", 33.6797222222, -78.9283333333),
("KNBC", 32.4772222222, -80.7230555556),
("KNBG", 29.8252777778, -90.035),
("KNCA", 34.7083333333, -77.4394444444),
("KNEL", 40.0333333333, -74.3533333333),
("KNFL", 39.4163888889, -118.700833333),
("KNGU", 36.9375, -76.2891666667),
("KNHK", 38.2858333333, -76.4116666667),
("KNID", 35.6877777778, -117.690555556),
("KNIP", 30.2358333333, -81.6805555556),
("KNJK", 32.8291666667, -115.671666667),
("KNKT", 34.9025, -76.8808333333),
("KNKX", 32.8683333333, -117.1425),
("KNLC", 36.3327777778, -119.951944444),
("KNMM", 32.5519444444, -88.5555555556),
("KNOW", 48.1413888889, -123.413888889),
("KNPA", 30.3525, -87.3186111111),
("KNQA", 35.3566666667, -89.8702777778),
("KNQI", 27.5072222222, -97.8097222222),
("KNQX", 24.5758333333, -81.6888888889),
("KNSE", 30.7241666667, -87.0219444444),
("KNTD", 34.1202777778, -119.120833333),
("KNTK", 33.7061111111, -117.827222222),
("KNTU", 36.8205555556, -76.0333333333),
("KNUQ", 37.415, -122.048055556),
("KNUW", 48.3516666667, -122.655833333),
("KNXP", 34.2961111111, -116.161944444),
("KNXX", 40.1997222222, -75.1480555556),
("KNYG", 38.5016666667, -77.3052777778),
("KNZC", 30.2186111111, -81.8763888889),
("KNZY", 32.6991666667, -117.215277778),
("KOAK", 37.7211111111, -122.220555556),
("KOFF", 41.1183333333, -95.9125),
("KOGS", 44.6819444444, -75.4655555556),
("KOKC", 35.3930555556, -97.6005555556),
("KOLS", 31.4175, -110.847777778),
("KOMA", 41.3025, -95.8936111111),
("KONT", 34.0558333333, -117.601111111),
("KOPF", 25.9069444444, -80.2783333333),
("KORD", 41.9794444444, -87.9044444444),
("KORF", 36.8944444444, -76.2011111111),
("KORL", 28.5452777778, -81.3327777778),
("KOSC", 44.4513888889, -83.3938888889),
("KPAE", 47.9061111111, -122.281388889),
("KPAM", 30.0697222222, -85.5763888889),
("KPBF", 34.1747222222, -91.9344444444),
("KPBG", 44.6508333333, -73.4680555556),
("KPBI", 26.6830555556, -80.0955555556),
("KPDX", 45.5886111111, -122.5975),
("KPHF", 37.1316666667, -76.4927777778),
("KPHL", 39.8719444444, -75.2411111111),
("KPHN", 42.9108333333, -82.5286111111),
("KPHX", 33.4341666667, -112.008055556),
("KPIE", 27.9105555556, -82.6872222222),
("KPIT", 40.4913888889, -80.2327777778),
("KPMB", 48.9425, -97.2408333333),
("KPMD", 34.6291666667, -118.084444444),
("KPNC", 36.7305555556, -97.0997222222),
("KPNE", 40.0819444444, -75.0105555556),
("KPNM", 45.5597222222, -93.6080555556),
("KPNS", 30.4730555556, -87.1872222222),
("KPOB", 35.1708333333, -79.0144444444),
("KPOE", 31.0447222222, -93.1913888889),
("KPQI", 46.6888888889, -68.0447222222),
("KPRC", 34.6544444444, -112.419444444),
("KPSP", 33.8294444444, -116.506666667),
("KPSX", 28.7275, -96.2508333333),
("KPUB", 38.2888888889, -104.496388889),
("KPVD", 41.7238888889, -71.4280555556),
("KPWM", 43.6461111111, -70.3086111111),
("KRAL", 33.9516666667, -117.445),
("KRBM", 34.85, -92.3),
("KRCA", 44.145, -103.103333333),
("KRDR", 47.9608333333, -97.4011111111),
("KRDU", 35.8775, -78.7872222222),
("KRIC", 37.505, -77.3194444444),
("KRIU", 38.4886111111, -121.102222222),
("KRIV", 33.8805555556, -117.259444444),
("KRME", 43.2336111111, -75.4069444444),
("KRND", 29.5294444444, -98.2788888889),
("KRNO", 39.4983333333, -119.768055556),
("KROC", 43.1186111111, -77.6722222222),
("KROW", 33.3013888889, -104.530555556),
("KRSW", 26.5361111111, -81.755),
("KSAC", 38.5125, -121.493333333),
("KSAF", 35.6166666667, -106.088055556),
("KSAN", 32.7333333333, -117.189444444),
("KSAT", 29.5336111111, -98.4697222222),
("KSAV", 32.1275, -81.2019444444),
("KSAW", 46.3536111111, -87.3958333333),
("KSBO", 32.6083333333, -82.3686111111),
("KSBY", 38.3402777778, -75.5102777778),
("KSCK", 37.8941666667, -121.238611111),
("KSEA", 47.4488888889, -122.309166667),
("KSEM", 32.3438888889, -86.9877777778),
("KSFF", 47.6827777778, -117.3225),
("KSFO", 37.6188888889, -122.374722222),
("KSFZ", 41.9205555556, -71.4911111111),
("KSHV", 32.4463888889, -93.8255555556),
("KSJC", 37.3616666667, -121.928888889),
("KSJT", 31.3575, -100.496111111),
("KSKA", 47.615, -117.655555556),
("KSKF", 29.3841666667, -98.5808333333),
("KSKY", 41.4333333333, -82.6522222222),
("KSLC", 40.7883333333, -111.9775),
("KSMF", 38.6952777778, -121.590555556),
("KSNA", 33.6755555556, -117.868055556),
("KSPB", 45.7725, -122.862222222),
("KSPG", 27.765, -82.6269444444),
("KSPS", 33.9886111111, -98.4916666667),
("KSSC", 33.9727777778, -80.4727777778),
("KSTL", 38.7475, -90.3597222222),
("KSUU", 38.2625, -121.927222222),
("KSUX", 42.4025, -96.3841666667),
("KSVN", 32.01, -81.1455555556),
("KSWF", 41.5038888889, -74.1047222222),
("KSYR", 43.1111111111, -76.1061111111),
("KSZL", 38.7302777778, -93.5477777778),
("KTBN", 37.7413888889, -92.1405555556),
("KTCC", 35.1827777778, -103.603055556),
("KTCM", 47.1375, -122.476388889),
("KTCS", 33.2369444444, -107.271666667),
("KTEB", 40.8497222222, -74.0608333333),
("KTIK", 35.4147222222, -97.3863888889),
("KTLH", 30.3963888889, -84.3502777778),
("KTMB", 25.6477777778, -80.4327777778),
("KTNT", 25.8616666667, -80.8969444444),
("KTNX", 37.7944444444, -116.778611111),
("KTPA", 27.9752777778, -82.5330555556),
("KTTN", 40.2766666667, -74.8133333333),
("KTUL", 36.1983333333, -95.8880555556),
("KTUS", 32.1161111111, -110.941388889),
("KTXK", 33.4536111111, -93.9908333333),
("KTYR", 32.3538888889, -95.4022222222),
("KTYS", 35.8122222222, -83.9927777778),
("KUGN", 42.4219444444, -87.8677777778),
("KVAD", 30.9677777778, -83.1927777778),
("KVBG", 34.7294444444, -120.576666667),
("KVCV", 34.5930555556, -117.379444444),
("KVPS", 30.4830555556, -86.5252777778),
("KVRB", 27.6555555556, -80.4177777778),
("KWAL", 37.94, -75.4663888889),
("KWRB", 32.64, -83.5916666667),
("KWRI", 40.0155555556, -74.5936111111),
("KWSD", 32.3413888889, -106.4025),
("KWWD", 39.0083333333, -74.9080555556),
("KYIP", 42.2377777778, -83.5302777778),
("KYNG", 41.2605555556, -80.6788888889),
("KYUM", 32.6563888889, -114.605833333),
("KZUN", 35.0833333333, -108.791666667),
("LATI", 41.4147222222, 19.7205555556),
("LBBG", 42.5688888889, 27.5138888889),
("LBGO", 43.1513888889, 25.7127777778),
("LBPD", 42.0677777778, 24.8508333333),
("LBSF", 42.695, 23.4061111111),
("LBSZ", 42.3766666667, 25.655),
("LBWN", 43.2319444444, 27.825),
("LCLK", 34.875, 33.6247222222),
("LCPH", 34.7177777778, 32.4855555556),
("LCRA", 34.5902777778, 32.9877777778),
("LDDU", 42.5611111111, 18.2680555556),
("LDOC", 45.5419444444, 18.6361111111),
("LDOS", 45.4625, 18.8113888889),
("LDPL", 44.8933333333, 13.9219444444),
("LDRG", 45.3794444444, 14.5036111111),
("LDRI", 45.2166666667, 14.57),
("LDSP", 43.5388888889, 16.2977777778),
("LDVA", 46.2947222222, 16.3811111111),
("LDZA", 45.7427777778, 16.0686111111),
("LDZD", 44.1080555556, 15.3466666667),
("LDZU", 44.5575, 15.7741666667),
("LEAB", 38.9483333333, -1.86333333333),
("LEAL", 38.2819444444, -0.558055555556),
("LEAM", 36.8438888889, -2.37),
("LEAS", 43.5633333333, -6.03444444444),
("LEBA", 37.8419444444, -4.84861111111),
("LEBB", 43.3008333333, -2.91055555556),
("LEBG", 42.3575, -3.62055555556),
("LEBL", 41.2969444444, 2.07833333333),
("LEBZ", 38.8911111111, -6.82111111111),
("LECO", 43.3019444444, -8.37722222222),
("LEGA", 37.1330555556, -3.63555555556),
("LEGE", 41.9008333333, 2.76027777778),
("LEGR", 37.1886111111, -3.77722222222),
("LEGT", 40.2938888889, -3.72361111111),
("LEIB", 38.8727777778, 1.37305555556),
("LEJR", 36.7444444444, -6.06),
("LELC", 37.7747222222, -0.812222222222),
("LELN", 42.5888888889, -5.65555555556),
("LEMD", 40.4722222222, -3.56083333333),
("LEMG", 36.6736111111, -4.49888888889),
("LEMH", 39.8625, 4.21861111111),
("LEMM", 40.4166666667, -3.5),
("LEMO", 37.1747222222, -5.61583333333),
("LEOC", 39.9375, -3.50333333333),
("LEPA", 39.55, 2.73333333333),
("LEPP", 42.77, -1.64611111111),
("LERI", 37.9511111111, -1.23027777778),
("LERS", 41.1472222222, 1.16694444444),
("LERT", 36.645, -6.34944444444),
("LESA", 40.9519444444, -5.50194444444),
("LESB", 39.5988888889, 2.70277777778),
("LESJ", 39.5516666667, 2.73861111111),
("LESL", 39.8622222222, 4.25833333333),
("LESO", 43.3563888889, -1.79055555556),
("LEST", 42.8961111111, -8.415),
("LESU", 42.3386111111, 1.40916666667),
("LETO", 40.4866666667, -3.45805555556),
("LEVC", 39.4891666667, -0.481388888889),
("LEVD", 41.7061111111, -4.85194444444),
("LEVS", 40.3705555556, -3.785),
("LEVT", 42.8827777778, -2.72444444444),
("LEVX", 42.2316666667, -8.62666666667),
("LEXJ", 43.4269444444, -3.82),
("LEZG", 41.6661111111, -1.04138888889),
("LEZL", 37.4177777778, -5.89305555556),
("LFAC", 50.9619444444, 1.95472222222),
("LFAG", 49.8688888889, 3.02777777778),
("LFAI", 48.5936111111, 3.005),
("LFAO", 48.5458333333, -0.387222222222),
("LFAQ", 49.9713888889, 2.69972222222),
("LFAT", 50.5147222222, 1.62722222222),
("LFAV", 50.3255555556, 3.46111111111),
("LFAY", 49.8713888889, 2.38638888889),
("LFBA", 44.1747222222, 0.590555555556),
("LFBC", 44.5333333333, -1.125),
("LFBD", 44.8283333333, -0.715555555556),
("LFBE", 44.8252777778, 0.518611111111),
("LFBF", 43.5455555556, 1.3675),
("LFBG", 45.6583333333, -0.3175),
("LFBI", 46.5875, 0.306666666667),
("LFBK", 46.2244444444, 2.36305555556),
("LFBL", 45.8627777778, 1.17944444444),
("LFBM", 43.9116666667, -0.5075),
("LFBN", 46.3111111111, -0.401388888889),
("LFBO", 43.6288888889, 1.36361111111),
("LFBP", 43.38, -0.418611111111),
("LFBR", 43.4488888889, 1.26333333333),
("LFBT", 43.1786111111, -0.00638888888889),
("LFBU", 45.7291666667, 0.221388888889),
("LFBV", 45.1508333333, 1.46916666667),
("LFBX", 45.1980555556, 0.815555555556),
("LFBZ", 43.4683333333, -1.52305555556),
("LFCC", 44.3511111111, 1.47527777778),
("LFCG", 43.0075, 1.10305555556),
("LFCH", 44.5963888889, -1.11083333333),
("LFCI", 43.9136111111, 2.11305555556),
("LFCK", 43.5561111111, 2.28916666667),
("LFCL", 43.5861111111, 1.49916666667),
("LFCM", 43.99, 3.18305555556),
("LFCR", 44.4077777778, 2.4825),
("LFCU", 45.5347222222, 2.42388888889),
("LFCW", 44.3969444444, 0.758888888889),
("LFCY", 45.6280555556, -0.9725),
("LFCZ", 44.1461111111, -1.17444444444),
("LFDA", 43.7094444444, -0.245277777778),
("LFDB", 44.0255555556, 1.37777777778),
("LFDH", 43.6877777778, 0.601666666667),
("LFDI", 44.9822222222, -0.134722222222),
("LFDJ", 43.0905555556, 1.69583333333),
("LFDM", 44.4988888889, 0.200277777778),
("LFDN", 45.8877777778, -0.983055555556),
("LFEC", 48.4627777778, -5.06388888889),
("LFED", 48.0583333333, -2.92166666667),
("LFES", 48.0525, -3.66444444444),
("LFEY", 46.7186111111, -2.39111111111),
("LFFI", 47.4080555556, -1.1775),
("LFFN", 48.4297222222, 4.48111111111),
("LFGA", 48.1097222222, 7.35888888889),
("LFGF", 47.0058333333, 4.89333333333),
("LFGJ", 47.0388888889, 5.42722222222),
("LFGK", 47.9922222222, 3.39222222222),
("LFGW", 49.1222222222, 5.46888888889),
("LFHO", 44.5441666667, 4.37194444444),
("LFHP", 45.0794444444, 3.76472222222),
("LFHQ", 45.0763888889, 2.99361111111),
("LFHS", 46.2008333333, 5.29194444444),
("LFHV", 45.9163888889, 4.64055555556),
("LFHY", 46.5344444444, 3.42361111111),
("LFIG", 44.1775, 2.515),
("LFIO", 43.5688888889, 1.48083333333),
("LFJL", 48.9822222222, 6.25361111111),
("LFKB", 42.5538888889, 9.48333333333),
("LFKC", 42.5305555556, 8.79305555556),
("LFKF", 41.5005555556, 9.09777777778),
("LFKJ", 41.9236111111, 8.80277777778),
("LFKO", 41.6630555556, 8.89027777778),
("LFKS", 41.9241666667, 9.40583333333),
("LFKT", 42.2936111111, 9.19305555556),
("LFLA", 47.85, 3.49694444444),
("LFLB", 45.6377777778, 5.88),
("LFLC", 45.7863888889, 3.16916666667),
("LFLD", 47.0580555556, 2.37027777778),
("LFLE", 45.5608333333, 5.97555555556),
("LFLH", 46.8258333333, 4.8175),
("LFLI", 46.1919444444, 6.26833333333),
("LFLL", 45.7261111111, 5.09083333333),
("LFLM", 46.295, 4.79555555556),
("LFLN", 46.4125, 4.01305555556),
("LFLO", 46.0583333333, 4.00138888889),
("LFLP", 45.9291666667, 6.09861111111),
("LFLS", 45.3627777778, 5.32916666667),
("LFLT", 46.3525, 2.57027777778),
("LFLU", 44.9213888889, 4.96972222222),
("LFLV", 46.1694444444, 3.40361111111),
("LFLW", 44.8913888889, 2.42194444444),
("LFLX", 46.8619444444, 1.73055555556),
("LFLY", 45.7280555556, 4.94472222222),
("LFMA", 43.5052777778, 5.36777777778),
("LFMC", 43.3844444444, 6.38694444444),
("LFMD", 43.5419444444, 6.95333333333),
("LFMH", 45.5402777778, 4.29638888889),
("LFMI", 43.5225, 4.92361111111),
("LFMK", 43.2158333333, 2.30611111111),
("LFML", 43.4355555556, 5.21361111111),
("LFMN", 43.6605555556, 7.2175),
("LFMO", 44.1402777778, 4.86666666667),
("LFMP", 42.7402777778, 2.87055555556),
("LFMQ", 43.2525, 5.785),
("LFMS", 44.0694444444, 4.14194444444),
("LFMT", 43.5761111111, 3.96277777778),
("LFMU", 43.3238888889, 3.35555555556),
("LFMV", 43.9072222222, 4.90166666667),
("LFMY", 43.6063888889, 5.10916666667),
("LFMZ", 43.1758333333, 2.73416666667),
("LFNB", 44.5019444444, 3.53277777778),
("LFNH", 44.0297222222, 5.07805555556),
("LFOA", 47.0533333333, 2.6325),
("LFOB", 49.4544444444, 2.11277777778),
("LFOC", 48.0580555556, 1.37638888889),
("LFOD", 47.2566666667, -0.115),
("LFOE", 49.0286111111, 1.21972222222),
("LFOH", 49.5338888889, 0.0880555555556),
("LFOI", 50.1433333333, 1.83166666667),
("LFOJ", 47.9877777778, 1.76055555556),
("LFOK", 48.7758333333, 4.18444444444),
("LFOP", 49.3841666667, 1.17472222222),
("LFOT", 47.4322222222, 0.7275),
("LFOU", 47.0819444444, -0.876944444444),
("LFOV", 48.0311111111, -0.742777777778),
("LFOZ", 47.8969444444, 2.16333333333),
("LFPB", 48.9694444444, 2.44138888889),
("LFPC", 49.2533333333, 2.51888888889),
("LFPG", 49.0127777778, 2.55),
("LFPK", 48.8375, 3.01611111111),
("LFPM", 48.6047222222, 2.67111111111),
("LFPN", 48.7516666667, 2.10611111111),
("LFPO", 48.7252777778, 2.35944444444),
("LFPT", 49.0963888889, 2.04083333333),
("LFPV", 48.7741666667, 2.20138888889),
("LFPY", 48.5966666667, 2.33138888889),
("LFQA", 49.2077777778, 4.15666666667),
("LFQB", 48.3227777778, 4.01777777778),
("LFQC", 48.5972222222, 6.54472222222),
("LFQE", 49.2266666667, 5.67194444444),
("LFQF", 46.9663888889, 4.26027777778),
("LFQG", 47.0011111111, 3.11444444444),
("LFQI", 50.2216666667, 3.15416666667),
("LFQJ", 50.3102777778, 4.03305555556),
("LFQM", 47.2083333333, 6.08305555556),
("LFQP", 48.7661111111, 7.20027777778),
("LFQQ", 50.5616666667, 3.08944444444),
("LFQT", 50.6183333333, 2.64222222222),
("LFQV", 49.7838888889, 4.64694444444),
("LFQW", 47.6375, 6.20388888889),
("LFRA", 47.4972222222, -0.5725),
("LFRB", 48.4477777778, -4.41833333333),
("LFRC", 49.65, -1.47027777778),
("LFRD", 48.5877777778, -2.08),
("LFRE", 47.2894444444, -2.34638888889),
("LFRF", 48.8830555556, -1.56416666667),
("LFRG", 49.3652777778, 0.154166666667),
("LFRH", 47.7605555556, -3.44),
("LFRI", 46.7019444444, -1.37861111111),
("LFRJ", 48.53, -4.15138888889),
("LFRK", 49.1733333333, -0.45),
("LFRL", 48.2816666667, -4.445),
("LFRM", 47.9486111111, 0.201666666667),
("LFRN", 48.0694444444, -1.73472222222),
("LFRO", 48.7541666667, -3.47138888889),
("LFRQ", 47.9747222222, -4.16777777778),
("LFRS", 47.1530555556, -1.61055555556),
("LFRT", 48.5377777778, -2.85444444444),
("LFRU", 48.6030555556, -3.81555555556),
("LFRV", 47.7230555556, -2.71833333333),
("LFRZ", 47.3119444444, -2.14916666667),
("LFSB", 47.5894444444, 7.52972222222),
("LFSC", 47.9219444444, 7.39944444444),
("LFSD", 47.2688888889, 5.09),
("LFSF", 49.0716666667, 6.13166666667),
("LFSG", 48.3247222222, 6.06972222222),
("LFSH", 48.7933333333, 7.81611111111),
("LFSI", 48.6358333333, 4.89916666667),
("LFSL", 48.78, 5.97972222222),
("LFSM", 47.4869444444, 6.7925),
("LFSN", 48.6919444444, 6.23027777778),
("LFSO", 48.5830555556, 5.955),
("LFSP", 46.9044444444, 6.32694444444),
("LFSR", 49.31, 4.05),
("LFST", 48.5380555556, 7.62805555556),
("LFSX", 47.7830555556, 6.36388888889),
("LFTF", 43.2477777778, 6.12666666667),
("LFTH", 43.0972222222, 6.14583333333),
("LFTW", 43.7572222222, 4.41611111111),
("LFVM", 47.0952777778, -56.3802777778),
("LFVP", 46.7625, -56.1752777778),
("LFXA", 45.9872222222, 5.32833333333),
("LFXI", 44.0566666667, 5.49527777778),
("LFYD", 48.0847222222, 5.665),
("LFYG", 50.1413888889, 3.26305555556),
("LFYH", 47.335, 5.51361111111),
("LFYL", 47.7044444444, 6.54583333333),
("LFYT", 49.7583333333, 3.21194444444),
("LGAD", 37.9205555556, 21.2925),
("LGAG", 38.6019444444, 21.3511111111),
("LGAL", 40.8558333333, 25.9561111111),
("LGAT", 37.8877777778, 23.7316666667),
("LGAX", 40.6511111111, 22.4886111111),
("LGBL", 39.2194444444, 22.7941666667),
("LGEL", 38.0636111111, 23.5558333333),
("LGHI", 38.3430555556, 26.1405555556),
("LGIO", 39.6963888889, 20.8225),
("LGIR", 35.3394444444, 25.1802777778),
("LGKA", 40.4494444444, 21.2761111111),
("LGKC", 36.2741666667, 23.0169444444),
("LGKF", 38.1197222222, 20.5005555556),
("LGKL", 37.0683333333, 22.0255555556),
("LGKM", 40.9722222222, 24.3416666667),
("LGKO", 36.7933333333, 27.0916666667),
("LGKP", 35.4213888889, 27.1458333333),
("LGKR", 39.6019444444, 19.9116666667),
("LGKS", 35.4211111111, 26.91),
("LGKV", 40.9130555556, 24.6191666667),
("LGKZ", 40.2861111111, 21.8408333333),
("LGLE", 37.1847222222, 26.8002777778),
("LGLM", 39.9169444444, 25.2361111111),
("LGLR", 39.65, 22.4652777778),
("LGMG", 37.9811111111, 23.3652777778),
("LGMK", 37.435, 25.3480555556),
("LGMR", 38.145, 24.0141666667),
("LGMT", 39.0566666667, 26.5983333333),
("LGPZ", 38.9252777778, 20.7652777778),
("LGRD", 36.3830555556, 28.1088888889),
("LGRP", 36.4052777778, 28.0861111111),
("LGRX", 38.1511111111, 21.4255555556),
("LGSA", 35.5316666667, 24.1494444444),
("LGSK", 39.1769444444, 23.5036111111),
("LGSM", 37.69, 26.9116666667),
("LGSO", 37.4227777778, 24.9508333333),
("LGSP", 36.9733333333, 22.5261111111),
("LGSR", 36.4002777778, 25.4786111111),
("LGST", 35.2136111111, 26.0975),
("LGSV", 39.48, 22.7672222222),
("LGSY", 38.9675, 24.4872222222),
("LGTG", 38.3397222222, 23.5647222222),
("LGTL", 35.1869444444, 25.3266666667),
("LGTP", 37.5308333333, 22.405),
("LGTS", 40.5197222222, 22.9708333333),
("LGTT", 38.1088888889, 23.7836111111),
("LGZA", 37.75, 20.8819444444),
("LHBP", 47.4366666667, 19.2555555556),
("LHDC", 47.4888888889, 21.6152777778),
("LHGD", 47.5708333333, 19.3386111111),
("LHKE", 46.9175, 19.7491666667),
("LHKV", 46.3891666667, 17.7313888889),
("LHNY", 47.9838888889, 21.6922222222),
("LHOY", 46.3038888889, 18.7691666667),
("LHPA", 47.3638888889, 17.5008333333),
("LHSA", 47.0777777778, 17.9683333333),
("LHSK", 46.8580555556, 18.0955555556),
("LHSM", 46.6863888889, 17.1588888889),
("LHSN", 47.1227777778, 20.2352777778),
("LHTA", 46.3930555556, 17.9172222222),
("LHTL", 47.3452777778, 18.9808333333),
("LIBA", 41.5386111111, 15.7133333333),
("LIBC", 38.9972222222, 17.08),
("LIBD", 41.1383333333, 16.7605555556),
("LIBF", 41.4327777778, 15.535),
("LIBG", 40.5161111111, 17.4022222222),
("LIBN", 40.2386111111, 18.1330555556),
("LIBP", 42.4313888889, 14.1808333333),
("LIBR", 40.6575, 17.9469444444),
("LIBV", 40.7677777778, 16.9333333333),
("LICA", 38.9063888889, 16.2422222222),
("LICC", 37.4666666667, 15.0661111111),
("LICD", 35.4977777778, 12.6180555556),
("LICG", 36.8163888889, 11.9686111111),
("LICJ", 38.1758333333, 13.0908333333),
("LICP", 38.1108333333, 13.3133333333),
("LICR", 38.0711111111, 15.6513888889),
("LICT", 37.9125, 12.4880555556),
("LICZ", 37.4013888889, 14.9222222222),
("LIEA", 40.6319444444, 8.29055555556),
("LIED", 39.3541666667, 8.97222222222),
("LIEE", 39.2513888889, 9.05416666667),
("LIEO", 40.8986111111, 9.5175),
("LIET", 39.9186111111, 9.68277777778),
("LIIB", 40.4166666667, 12.3333333333),
("LIMA", 45.0861111111, 7.60305555556),
("LIMB", 45.5397222222, 9.20222222222),
("LIMC", 45.6313888889, 8.72777777778),
("LIME", 45.6738888889, 9.70416666667),
("LIMF", 45.2005555556, 7.64944444444),
("LIMG", 44.0505555556, 8.12722222222),
("LIMJ", 44.4119444444, 8.84166666667),
("LIML", 45.4452777778, 9.27694444444),
("LIMN", 45.5294444444, 8.66916666667),
("LIMP", 44.8244444444, 10.2961111111),
("LIMS", 44.9130555556, 9.72333333333),
("LIMW", 45.7383333333, 7.36777777778),
("LIMZ", 44.5463888889, 7.62222222222),
("LIPA", 46.0316666667, 12.5963888889),
("LIPB", 46.4605555556, 11.3261111111),
("LIPC", 44.2236111111, 12.3061111111),
("LIPE", 44.5341666667, 11.2902777778),
("LIPH", 45.6486111111, 12.1952777778),
("LIPI", 45.9805555556, 13.0544444444),
("LIPK", 44.1947222222, 12.07),
("LIPL", 45.4319444444, 10.2675),
("LIPN", 45.4719444444, 10.9277777778),
("LIPO", 45.4288888889, 10.3305555556),
("LIPQ", 45.8277777778, 13.4663888889),
("LIPR", 44.0202777778, 12.6119444444),
("LIPS", 45.6844444444, 12.0861111111),
("LIPT", 45.5741666667, 11.5305555556),
("LIPU", 45.3955555556, 11.8477777778),
("LIPX", 45.3955555556, 10.8883333333),
("LIPZ", 45.505, 12.3516666667),
("LIQS", 43.2572222222, 11.2541666667),
("LIRA", 41.7991666667, 12.5947222222),
("LIRE", 41.6536111111, 12.4444444444),
("LIRF", 41.8127777778, 12.2530555556),
("LIRG", 41.9902777778, 12.7408333333),
("LIRI", 40.6202777778, 14.9111111111),
("LIRJ", 42.7602777778, 10.2394444444),
("LIRL", 41.5422222222, 12.9088888889),
("LIRM", 41.0608333333, 14.0819444444),
("LIRN", 40.8858333333, 14.2905555556),
("LIRP", 43.6838888889, 10.3925),
("LIRQ", 43.8097222222, 11.205),
("LIRS", 42.7608333333, 11.0722222222),
("LIRU", 41.9519444444, 12.4988888889),
("LIRV", 42.4302777778, 12.0641666667),
("LIRZ", 43.0958333333, 12.5130555556),
("LJCE", 45.8997222222, 15.53),
("LJLJ", 46.2236111111, 14.4575),
("LJMB", 46.4797222222, 15.6861111111),
("LJPZ", 45.4733333333, 13.6147222222),
("LJSG", 46.4719444444, 15.1169444444),
("LKCS", 48.9461111111, 14.4272222222),
("LKCT", 49.6844444444, 15.6761111111),
("LKCV", 49.9394444444, 15.3816666667),
("LKHK", 50.2530555556, 15.845),
("LKKB", 50.1211111111, 14.5436111111),
("LKKU", 49.0294444444, 17.4397222222),
("LKKV", 50.2027777778, 12.9147222222),
("LKLN", 49.675, 13.2744444444),
("LKMH", 50.54, 15.0063888889),
("LKMT", 49.6963888889, 18.1111111111),
("LKNA", 49.1658333333, 16.1247222222),
("LKPD", 50.0133333333, 15.7386111111),
("LKPM", 49.7186111111, 14.0969444444),
("LKPO", 49.4258333333, 17.4047222222),
("LKPR", 50.1008333333, 14.26),
("LKSO", 49.2447222222, 14.7136111111),
("LKTB", 49.1511111111, 16.6941666667),
("LKVO", 50.2163888889, 14.3955555556),
("LLBG", 32.0094444444, 34.8766666667),
("LLBS", 31.2869444444, 34.7227777778),
("LLEK", 31.8394444444, 34.8216666667),
("LLES", 32.4408333333, 35.0061111111),
("LLET", 29.5611111111, 34.96),
("LLEY", 30.6230555556, 35.2019444444),
("LLHA", 32.8111111111, 35.0438888889),
("LLHS", 31.7625, 34.7272222222),
("LLIB", 32.9808333333, 35.5716666667),
("LLJR", 31.8666666667, 35.2166666667),
("LLLL", 31.2286111111, 35.1908333333),
("LLMG", 32.5986111111, 35.2283333333),
("LLMZ", 31.3280555556, 35.3883333333),
("LLNV", 31.2083333333, 35.0122222222),
("LLOV", 29.94, 34.9358333333),
("LLRD", 32.6602777778, 35.1822222222),
("LLRM", 30.7761111111, 34.6666666667),
("LLSD", 32.1144444444, 34.7819444444),
("LMML", 35.8572222222, 14.4775),
("LMMM", 35.9166666667, 14.4166666667),
("LOAN", 47.8433333333, 16.26),
("LOLW", 48.1830555556, 14.0408333333),
("LOWG", 47.0, 15.4333333333),
("LOWI", 47.26, 11.3438888889),
("LOWK", 46.65, 14.3333333333),
("LOWL", 48.2333333333, 14.1833333333),
("LOWM", 48.0, 16.5),
("LOWS", 47.7930555556, 13.0041666667),
("LOWW", 48.1102777778, 16.5697222222),
("LOXG", 46.9908333333, 15.4394444444),
("LOXK", 46.6425, 14.3375),
("LOXL", 48.2330555556, 14.1875),
("LOXT", 48.3208333333, 16.1116666667),
("LOXZ", 47.2027777778, 14.7441666667),
("LPAR", 38.8830555556, -9.03),
("LPAZ", 36.9713888889, -25.1705555556),
("LPBG", 41.8683333333, -6.71194444444),
("LPBJ", 38.0788888889, -7.93222222222),
("LPBR", 41.5869444444, -8.445),
("LPCO", 40.1572222222, -8.47),
("LPCS", 38.725, -9.355),
("LPCV", 40.2647222222, -7.47972222222),
("LPEV", 38.5333333333, -7.88944444444),
("LPFL", 39.455, -31.1311111111),
("LPFR", 37.0141666667, -7.96583333333),
("LPGR", 39.0919444444, -28.0297222222),
("LPHR", 38.5197222222, -28.7158333333),
("LPIN", 40.9741666667, -8.64527777778),
("LPLA", 38.7641666667, -27.0933333333),
("LPMG", 38.7166666667, -9.15583333333),
("LPMR", 39.8311111111, -8.88722222222),
("LPMT", 38.7036111111, -9.03583333333),
("LPOV", 40.9158333333, -8.64583333333),
("LPPD", 37.7411111111, -25.6977777778),
("LPPI", 38.5544444444, -28.4397222222),
("LPPM", 37.1491666667, -8.58388888889),
("LPPR", 41.2480555556, -8.68138888889),
("LPPS", 33.0733333333, -16.3497222222),
("LPPT", 38.7811111111, -9.13583333333),
("LPSJ", 38.6652777778, -28.1755555556),
("LPST", 38.8308333333, -9.33944444444),
("LPTN", 39.475, -8.36444444444),
("LPVR", 41.2741666667, -7.72027777778),
("LPVZ", 40.7252777778, -7.88888888889),
("LQBK", 44.9413888889, 17.2975),
("LQMO", 43.2827777778, 17.8458333333),
("LQSA", 43.8244444444, 18.3313888889),
("LRAR", 46.1763888889, 21.2619444444),
("LRBC", 46.5219444444, 26.9102777778),
("LRBM", 47.6583333333, 23.47),
("LRBS", 44.5030555556, 26.1019444444),
("LRCK", 44.3622222222, 28.4883333333),
("LRCL", 46.785, 23.6861111111),
("LRCS", 45.42, 22.2533333333),
("LRCV", 44.3180555556, 23.8886111111),
("LRIA", 47.1788888889, 27.62),
("LROD", 47.0252777778, 21.9025),
("LROP", 44.5736111111, 26.1033333333),
("LRSB", 45.7855555556, 24.0911111111),
("LRSM", 47.7033333333, 22.8855555556),
("LRSV", 47.6875, 26.3538888889),
("LRTC", 45.0622222222, 28.7141666667),
("LRTM", 46.4675, 24.4125),
("LRTR", 45.81, 21.3377777778),
("LSAZ", 46.6136111111, 7.67777777778),
("LSGC", 47.0836111111, 6.79277777778),
("LSGG", 46.2380555556, 6.10888888889),
("LSGK", 46.4875, 7.25083333333),
("LSGS", 46.2194444444, 7.32666666667),
("LSMA", 46.9438888889, 8.28416666667),
("LSMC", 46.5011111111, 8.29555555556),
("LSMD", 47.3986111111, 8.64805555556),
("LSME", 47.0922222222, 8.305),
("LSMF", 47.0786111111, 9.06472222222),
("LSMI", 46.6763888889, 7.87916666667),
("LSMJ", 46.3038888889, 7.71444444444),
("LSMM", 46.7433333333, 8.11),
("LSMN", 46.3036111111, 7.82333333333),
("LSMP", 46.8430555556, 6.915),
("LSMU", 46.9747222222, 8.39888888889),
("LSSW", 47.3833333333, 8.56666666667),
("LSZA", 46.0041666667, 8.91055555556),
("LSZB", 46.9138888889, 7.49694444444),
("LSZG", 47.1813888889, 7.41694444444),
("LSZH", 47.4647222222, 8.54916666667),
("LSZR", 47.485, 9.56055555556),
("LSZS", 46.5325, 9.88277777778),
("LTAA", 40.0, 32.0),
("LTAB", 39.9347222222, 32.7405555556),
("LTAC", 40.1280555556, 32.995),
("LTAD", 39.9497222222, 32.6886111111),
("LTAE", 40.0788888889, 32.5655555556),
("LTAF", 36.9819444444, 35.2802777778),
("LTAG", 37.0019444444, 35.4258333333),
("LTAH", 38.7261111111, 30.6011111111),
("LTAI", 36.9013888889, 30.7916666667),
("LTAJ", 36.9480555556, 37.4791666667),
("LTAK", 36.5733333333, 36.1538888889),
("LTAL", 41.3138888889, 33.7958333333),
("LTAN", 37.9788888889, 32.5616666667),
("LTAP", 40.8291666667, 35.5219444444),
("LTAQ", 41.2763888889, 36.3036111111),
("LTAR", 39.8136111111, 36.9033333333),
("LTAS", 41.5158333333, 32.0997222222),
("LTAT", 38.4358333333, 38.0916666667),
("LTAU", 38.7702777778, 35.4952777778),
("LTAV", 39.4513888889, 31.3652777778),
("LTAW", 40.305, 36.3677777778),
("LTAX", 41.2544444444, 31.415),
("LTAY", 37.785, 29.7011111111),
("LTAZ", 38.7716666667, 34.5341666667),
("LTBA", 40.9766666667, 28.8211111111),
("LTBD", 37.8158333333, 27.8861111111),
("LTBE", 40.2316666667, 29.0091666667),
("LTBF", 39.6188888889, 27.9247222222),
("LTBG", 40.3177777778, 27.9775),
("LTBH", 40.1375, 26.4266666667),
("LTBI", 39.7838888889, 30.5819444444),
("LTBJ", 38.2922222222, 27.1569444444),
("LTBK", 38.3191666667, 27.1597222222),
("LTBL", 38.5130555556, 27.01),
("LTBM", 37.7852777778, 30.5816666667),
("LTBN", 39.4266666667, 30.0163888889),
("LTBO", 38.6811111111, 29.4713888889),
("LTBP", 40.6833333333, 29.3786111111),
("LTBQ", 40.735, 30.0830555556),
("LTBR", 40.255, 29.5625),
("LTBS", 36.7130555556, 28.7925),
("LTBT", 38.8086111111, 27.8336111111),
("LTBU", 41.1380555556, 27.9188888889),
("LTBX", 40.9927777778, 29.2163888889),
("LTBY", 39.8097222222, 30.5194444444),
("LTCA", 38.6066666667, 39.2913888889),
("LTCC", 37.8936111111, 40.2005555556),
("LTCD", 39.71, 39.5261111111),
("LTCE", 39.9563888889, 41.17),
("LTCF", 40.5622222222, 43.115),
("LTCG", 40.995, 39.7897222222),
("LTCH", 37.0919444444, 38.8461111111),
("LTCI", 38.4680555556, 43.3322222222),
("LTCJ", 37.9288888889, 41.1161111111),
("LTCK", 38.7544444444, 41.6611111111),
("LTCL", 37.9786111111, 41.8402777778),
("LTCM", 42.0188888889, 35.0791666667),
("LTFA", 38.5175, 26.9772222222),
("LTFB", 37.9505555556, 27.3288888889),
("LUBL", 47.8377777778, 27.7811111111),
("LUKK", 46.9277777778, 28.9313888889),
("LWOH", 41.1797222222, 20.7422222222),
("LWSK", 41.9613888889, 21.6213888889),
("LXGB", 36.1508333333, -5.34944444444),
("LYBE", 44.8183333333, 20.3088888889),
("LYPG", 42.3591666667, 19.2516666667),
("LYPR", 42.5727777778, 21.0358333333),
("LYTV", 42.4044444444, 18.7230555556),
("LYVR", 45.1466666667, 21.3097222222),
("LZIB", 48.17, 17.2125),
("LZKZ", 48.6630555556, 21.2411111111),
("LZMC", 48.4019444444, 17.1183333333),
("LZPP", 48.625, 17.8283333333),
("LZSL", 48.6377777778, 19.1338888889),
("LZTN", 48.865, 17.9922222222),
("LZTT", 49.0733333333, 20.2408333333),
("LZZI", 49.2316666667, 18.6136111111),
("MBNC", 21.9172222222, -71.9394444444),
("MBPV", 21.7736111111, -72.2658333333),
("MBSC", 21.5155555556, -71.5283333333),
("MDAB", 19.1986111111, -69.43),
("MDBH", 18.2513888889, -71.1202777778),
("MDCR", 17.9288888889, -71.6447222222),
("MDCZ", 18.9080555556, -70.72),
("MDHE", 18.4711111111, -69.9688888889),
("MDLR", 18.4519444444, -68.9116666667),
("MDPC", 18.5672222222, -68.3633333333),
("MDPP", 19.7572222222, -70.5697222222),
("MDSD", 18.4294444444, -69.6686111111),
("MDSI", 18.5036111111, -69.7616666667),
("MDST", 19.4091666667, -70.6163888889),
("MGBN", 15.4733333333, -88.8372222222),
("MGCB", 15.4688888889, -90.4066666667),
("MGGT", 14.5830555556, -90.5275),
("MGPB", 15.7308333333, -88.5836111111),
("MGPP", 16.3261111111, -89.4169444444),
("MGQZ", 14.8652777778, -91.5019444444),
("MGRT", 14.5208333333, -91.6972222222),
("MGSJ", 13.9361111111, -90.8358333333),
("MHIC", 17.4072222222, -83.9325),
("MHLC", 15.7422222222, -86.8533333333),
("MHLM", 15.4525, -87.9233333333),
("MHNJ", 16.4452777778, -85.9063888889),
("MHPL", 15.2608333333, -83.7813888889),
("MHRO", 16.3166666667, -86.5225),
("MHTE", 15.7758333333, -87.4755555556),
("MHTG", 14.0608333333, -87.2169444444),
("MHTJ", 15.9266666667, -85.9380555556),
("MKBS", 18.4041666667, -76.9688888889),
("MKJP", 17.9355555556, -76.7875),
("MKJS", 18.5036111111, -77.9133333333),
("MKKJ", 18.1986111111, -76.5344444444),
("MKTP", 17.9883333333, -76.8236111111),
("MMAA", 16.7566666667, -99.7533333333),
("MMAN", 25.8655555556, -100.237222222),
("MMAS", 21.7052777778, -102.317777778),
("MMBT", 15.7747222222, -96.2608333333),
("MMCB", 18.835, -99.2619444444),
("MMCC", 29.3319444444, -100.980833333),
("MMCE", 18.6536111111, -91.7988888889),
("MMCG", 30.3972222222, -107.874722222),
("MMCH", 17.5736111111, -99.5141666667),
("MMCL", 24.7644444444, -107.474444444),
("MMCM", 18.5044444444, -88.3266666667),
("MMCN", 27.3925, -109.833055556),
("MMCP", 19.8166666667, -90.5002777778),
("MMCS", 31.6361111111, -106.428611111),
("MMCU", 28.7027777778, -105.964444444),
("MMCV", 23.7038888889, -98.9563888889),
("MMCY", 20.5458333333, -100.886388889),
("MMCZ", 20.5222222222, -86.9255555556),
("MMDM", 22.7402777778, -99.0180555556),
("MMDO", 24.1241666667, -104.527777778),
("MMEP", 21.4194444444, -104.8425),
("MMES", 31.7952777778, -116.6025),
("MMGL", 20.5216666667, -103.311111111),
("MMGM", 27.9688888889, -110.925),
("MMHC", 18.4969444444, -97.4197222222),
("MMHO", 29.0958333333, -111.047777778),
("MMIA", 19.2769444444, -103.577222222),
("MMIM", 21.245, -86.7397222222),
("MMIO", 25.5494444444, -100.928611111),
("MMIT", 16.4491666667, -95.0936111111),
("MMJA", 19.475, -96.7975),
("MMLC", 18.0016666667, -102.220277778),
("MMLM", 25.685, -109.080555556),
("MMLO", 20.9933333333, -101.480833333),
("MMLP", 24.0725, -110.362222222),
("MMLT", 25.9891666667, -111.348333333),
("MMMA", 25.7697222222, -97.5252777778),
("MMMD", 20.9369444444, -89.6575),
("MMML", 32.6305555556, -115.241388889),
("MMMM", 19.8497222222, -101.025277778),
("MMMT", 18.1033333333, -94.5805555556),
("MMMV", 26.9555555556, -101.47),
("MMMX", 19.4361111111, -99.0719444444),
("MMMY", 25.7783333333, -100.106666667),
("MMMZ", 23.1611111111, -106.265833333),
("MMNG", 31.2258333333, -110.975555556),
("MMNL", 27.4438888889, -99.5702777778),
("MMOX", 16.9997222222, -96.7263888889),
("MMPA", 20.6025, -97.4608333333),
("MMPB", 19.1580555556, -98.3713888889),
("MMPC", 20.0772222222, -98.7822222222),
("MMPE", 31.3516666667, -113.525555556),
("MMPG", 28.6272222222, -100.535),
("MMPN", 19.3966666667, -102.039166667),
("MMPR", 20.68, -105.254166667),
("MMPS", 15.8766666667, -97.0888888889),
("MMQT", 20.6238888889, -100.368611111),
("MMRX", 26.0088888889, -98.2283333333),
("MMSD", 23.1516666667, -109.720833333),
("MMSF", 30.93, -114.808611111),
("MMSP", 22.2541666667, -100.930555556),
("MMTA", 19.5363888889, -98.1733333333),
("MMTC", 25.5680555556, -103.410555556),
("MMTG", 16.7694444444, -93.3413888889),
("MMTJ", 32.5408333333, -116.97),
("MMTM", 22.2963888889, -97.8658333333),
("MMTN", 22.0380555556, -98.8063888889),
("MMTO", 19.3369444444, -99.5658333333),
("MMTP", 14.7941666667, -92.37),
("MMTX", 19.5983333333, -103.371944444),
("MMUN", 21.0363888889, -86.8769444444),
("MMVA", 17.9969444444, -92.8172222222),
("MMVR", 19.1452777778, -96.1869444444),
("MMZC", 22.8969444444, -102.686666667),
("MMZH", 17.6013888889, -101.460277778),
("MMZM", 20.045, -102.275833333),
("MMZO", 19.1447222222, -104.558611111),
("MMZP", 20.7558333333, -103.465277778),
("MNBL", 11.9888888889, -83.7741666667),
("MNBR", 12.1894444444, -86.3538888889),
("MNLN", 12.4277777778, -86.9022222222),
("MNMG", 12.1411111111, -86.1680555556),
("MNPC", 14.0469444444, -83.3866666667),
("MPBO", 9.34083333333, -82.2508333333),
("MPCH", 9.45861111111, -82.5166666667),
("MPDA", 8.39083333333, -82.4347222222),
("MPHO", 8.91444444444, -79.5994444444),
("MPJE", 7.51722222222, -78.1566666667),
("MPLP", 8.40666666667, -78.1416666667),
("MPMG", 8.97333333333, -79.5555555556),
("MPSA", 8.08555555556, -80.945),
("MPTO", 9.07111111111, -79.3833333333),
("MRBA", 9.16694444444, -83.3325),
("MRBC", 10.7686111111, -83.5858333333),
("MRCC", 8.60111111111, -82.97),
("MRCV", 10.3555555556, -85.8527777778),
("MREC", 10.2019444444, -83.4719444444),
("MRFI", 8.91611111111, -83.5072222222),
("MRFS", 8.6525, -83.0652777778),
("MRGF", 8.65388888889, -83.1819444444),
("MRGP", 10.2172222222, -83.7947222222),
("MRLB", 10.5930555556, -85.5441666667),
("MRLC", 11.0352777778, -84.7061111111),
("MRLM", 9.95777777778, -83.0219444444),
("MRNS", 9.97638888889, -85.6527777778),
("MROC", 9.99361111111, -84.2086111111),
("MRPD", 9.73194444444, -82.9830555556),
("MRPM", 8.95083333333, -83.4683333333),
("MRPV", 9.95722222222, -84.1419444444),
("MRQP", 9.44305555556, -84.1297222222),
("MRSG", 10.2883333333, -83.7136111111),
("MRSV", 8.82611111111, -82.9588888889),
("MSLP", 13.4405555556, -89.0558333333),
("MSSS", 13.7, -89.12),
("MTCH", 19.7325, -72.1947222222),
("MTPP", 18.58, -72.2925),
("MUBA", 20.3655555556, -74.5063888889),
("MUBY", 20.3963888889, -76.6213888889),
("MUCA", 22.0269444444, -78.7894444444),
("MUCB", 22.5063888889, -79.4697222222),
("MUCC", 22.4611111111, -78.3286111111),
("MUCF", 22.15, -80.4141666667),
("MUCL", 21.6161111111, -81.5455555556),
("MUCM", 21.4202777778, -77.8475),
("MUCU", 19.97, -75.8355555556),
("MUFL", 21.4997222222, -78.2027777778),
("MUGM", 19.9063888889, -75.2069444444),
("MUGT", 20.0852777778, -75.1583333333),
("MUHA", 22.9891666667, -82.4091666667),
("MUHG", 20.7855555556, -76.315),
("MULM", 22.3358333333, -83.6419444444),
("MUMG", 22.9697222222, -82.2747222222),
("MUML", 23.0072222222, -82.7675),
("MUMO", 20.6541666667, -74.9216666667),
("MUMZ", 20.2880555556, -77.0891666667),
("MUNC", 20.6886111111, -75.5313888889),
("MUNG", 21.8347222222, -82.7838888889),
("MUPB", 23.0327777778, -82.5794444444),
("MUPR", 22.4211111111, -83.6775),
("MUSA", 22.8713888889, -82.5091666667),
("MUSC", 22.4919444444, -79.9436111111),
("MUSJ", 22.0952777778, -84.1519444444),
("MUSL", 21.5094444444, -77.0175),
("MUSN", 21.6425, -82.955),
("MUSS", 21.9705555556, -79.4422222222),
("MUTD", 21.7883333333, -79.9972222222),
("MUVR", 23.0344444444, -81.4352777778),
("MUVT", 20.9877777778, -76.9358333333),
("MWCB", 19.6866666667, -79.8827777778),
("MWCR", 19.2927777778, -81.3575),
("MYAB", 24.2875, -77.6844444444),
("MYAF", 24.6977777778, -77.7955555556),
("MYAK", 24.1586111111, -77.5897222222),
("MYAM", 26.5113888889, -77.0833333333),
("MYAN", 25.0536111111, -78.0488888889),
("MYAP", 22.4416666667, -73.9708333333),
("MYAS", 26.0044444444, -77.3952777778),
("MYAT", 26.7452777778, -77.3911111111),
("MYBC", 25.4169444444, -77.8808333333),
("MYBG", 25.7380555556, -77.84),
("MYBS", 25.6997222222, -79.2644444444),
("MYCA", 24.6291666667, -75.6736111111),
("MYCB", 24.315, -75.4538888889),
("MYCI", 22.7455555556, -74.1822222222),
("MYEF", 23.5625, -75.8777777778),
("MYEH", 25.4755555556, -76.6811111111),
("MYEM", 25.2844444444, -76.3308333333),
("MYEN", 24.5944444444, -76.8319444444),
("MYER", 24.8916666667, -76.1775),
("MYES", 24.1688888889, -76.4388888889),
("MYGF", 26.5586111111, -78.6952777778),
("MYGW", 26.6861111111, -78.9775),
("MYIG", 20.975, -73.6666666667),
("MYLD", 23.1788888889, -75.0933333333),
("MYLS", 23.5827777778, -75.2686111111),
("MYMM", 22.3794444444, -73.0133333333),
("MYNN", 25.0388888889, -77.4661111111),
("MYRD", 22.1816666667, -75.7294444444),
("MYSM", 24.0630555556, -74.5238888889),
("MZBZ", 17.5388888889, -88.3080555556),
("NCAI", -18.825, -159.773611111),
("NCRG", -21.2025, -159.805555556),
("NFFN", -17.7544444444, 177.443333333),
("NFNA", -18.0430555556, 178.559166667),
("NFNL", -16.4666666667, 179.339722222),
("NFTF", -21.2408333333, -175.15),
("NFTL", -19.7769444444, -174.341111111),
("NFTV", -18.5852777778, -173.962777778),
("NGFU", -8.51666666667, 179.216666667),
("NGTA", 1.38138888889, 173.146944444),
("NGTE", -1.22361111111, 174.776111111),
("NIUE", -19.08, -169.925555556),
("NLWW", -13.2380555556, -176.199166667),
("NSFA", -13.8297222222, -172.008333333),
("NSTU", -14.3308333333, -170.710277778),
("NTAR", -22.4338888889, -151.360555556),
("NTAT", -23.3652777778, -149.523888889),
("NTGA", -17.3525, -145.509722222),
("NTGB", -15.8197222222, -140.886944444),
("NTGC", -15.1194444444, -148.230555556),
("NTGE", -18.4658333333, -136.439444444),
("NTGF", -16.0544444444, -145.656944444),
("NTGI", -14.4366666667, -146.07),
("NTGJ", -23.0797222222, -134.890277778),
("NTGK", -15.6633333333, -146.884722222),
("NTGM", -16.5847222222, -143.657222222),
("NTGN", -14.1766666667, -141.267222222),
("NTGP", -14.8094444444, -138.812777778),
("NTGT", -14.7125, -145.252777778),
("NTGU", -15.2480555556, -146.616388889),
("NTGV", -14.8680555556, -148.717222222),
("NTGY", -20.7833333333, -138.567777778),
("NTKR", -14.4555555556, -145.024444444),
("NTMD", -8.79555555556, -140.228611111),
("NTMN", -9.76861111111, -139.011111111),
("NTTB", -16.4441666667, -151.751111111),
("NTTG", -14.9541666667, -147.660555556),
("NTTH", -16.6872222222, -151.021666667),
("NTTM", -17.4897222222, -149.761666667),
("NTTO", -18.0747222222, -140.945833333),
("NTTP", -16.4263888889, -152.243611111),
("NTTR", -16.7227777778, -151.465833333),
("NTTX", -21.8083333333, -138.794166667),
("NVSS", -15.5011111111, 167.2225),
("NVVV", -17.6991666667, 168.319722222),
("NWWD", -21.0533333333, 164.837777778),
("NWWK", -20.5461111111, 164.255555556),
("NWWL", -20.7747222222, 167.239722222),
("NWWM", -22.2580555556, 166.472777778),
("NWWR", -21.4816666667, 168.0375),
("NWWU", -20.7911111111, 165.259166667),
("NWWV", -20.6405555556, 166.572777778),
("NWWW", -22.0144444444, 166.212777778),
("NZAA", -37.0080555556, 174.791666667),
("NZAP", -38.7397222222, 176.084444444),
("NZAR", -37.0297222222, 174.973333333),
("NZCH", -43.4891666667, 172.532222222),
("NZCI", -43.81, -176.457222222),
("NZDN", -45.9280555556, 170.198333333),
("NZGS", -38.6633333333, 177.978333333),
("NZGT", -43.9066666667, 170.128333333),
("NZHK", -42.7136111111, 170.985277778),
("NZHN", -37.8663888889, 175.331944444),
("NZHS", -39.6466666667, 176.766944444),
("NZKK", -35.2627777778, 173.911944444),
("NZKT", -35.07, 173.285277778),
("NZLX", -45.2116666667, 169.373333333),
("NZMC", -43.765, 170.133333333),
("NZMO", -45.5330555556, 167.65),
("NZMS", -40.9733333333, 175.633611111),
("NZNP", -39.0086111111, 174.179166667),
("NZNS", -41.2983333333, 173.221111111),
("NZNV", -46.4122222222, 168.312777778),
("NZOH", -40.2058333333, 175.387777778),
("NZOU", -44.97, 171.081666667),
("NZPM", -40.3205555556, 175.616944444),
("NZPP", -40.9047222222, 174.989166667),
("NZQN", -45.0211111111, 168.739166667),
("NZRO", -38.1091666667, 176.317222222),
("NZRU", -39.4463888889, 175.658333333),
("NZSP", -89.9997222222, 0.0),
("NZTG", -37.6719444444, 176.196111111),
("NZTU", -44.3027777778, 171.225277778),
("NZUK", -44.235, 170.118333333),
("NZWB", -41.5183333333, 173.870277778),
("NZWD", -77.8833333333, 166.65),
("NZWF", -44.7252777778, 169.243055556),
("NZWG", -43.5511111111, 172.552777778),
("NZWK", -37.9205555556, 176.914166667),
("NZWN", -41.3272222222, 174.805277778),
("NZWO", -39.0069444444, 177.406666667),
("NZWP", -36.7877777778, 174.630277778),
("NZWR", -35.7683333333, 174.365),
("NZWS", -41.7380555556, 171.580833333),
("NZWU", -39.9622222222, 175.025277778),
("OAHR", 34.2097222222, 62.2277777778),
("OAJL", 34.3991666667, 70.4994444444),
("OAKB", 34.5658333333, 69.2122222222),
("OAKN", 31.5058333333, 65.8477777778),
("OAMN", 35.9341666667, 64.7591666667),
("OAMS", 36.7069444444, 67.2091666667),
("OASD", 33.3911111111, 62.2608333333),
("OASG", 36.7502777778, 65.9122222222),
("OATQ", 36.775, 69.5325),
("OAUZ", 36.665, 68.9108333333),
("OBBI", 26.2708333333, 50.6336111111),
("OBBS", 25.9183333333, 50.5905555556),
("OEAB", 18.24, 42.6555555556),
("OEAH", 25.2841666667, 49.4861111111),
("OEBA", 20.2961111111, 41.6341666667),
("OEBH", 19.9838888889, 42.6227777778),
("OEBQ", 25.9113888889, 49.5913888889),
("OEDF", 26.4711111111, 49.7977777778),
("OEDR", 26.2652777778, 50.1519444444),
("OEGN", 16.9011111111, 42.5858333333),
("OEGS", 26.3027777778, 43.7744444444),
("OEGT", 31.4108333333, 37.2788888889),
("OEHL", 27.4377777778, 41.6861111111),
("OEJB", 27.0388888889, 49.405),
("OEJF", 21.3480555556, 39.1727777778),
("OEJN", 21.6794444444, 39.1563888889),
("OEKK", 27.9008333333, 45.5280555556),
("OEMA", 24.5533333333, 39.705),
("OENG", 17.6113888889, 44.4191666667),
("OEPA", 28.335, 46.125),
("OEPC", 25.1744444444, 47.4883333333),
("OEPF", 24.7102777778, 44.9644444444),
("OEPJ", 24.1072222222, 41.0358333333),
("OERB", 22.7025, 39.0697222222),
("OERF", 29.6263888889, 43.4905555556),
("OERK", 24.9575, 46.6986111111),
("OERM", 28.0794444444, 48.6108333333),
("OERR", 30.9072222222, 41.1383333333),
("OERT", 26.7230555556, 50.0305555556),
("OESH", 17.4666666667, 47.1211111111),
("OESK", 29.785, 40.1),
("OESL", 20.4647222222, 45.6194444444),
("OETB", 28.3652777778, 36.6188888889),
("OETF", 21.4833333333, 40.5441666667),
("OETH", 25.2141666667, 46.6405555556),
("OETN", 27.8688888889, 48.7683333333),
("OETR", 31.6925, 38.7311111111),
("OEWD", 20.5041666667, 45.1994444444),
("OEWJ", 26.1975, 36.4761111111),
("OEYN", 24.1441666667, 38.0633333333),
("OIAA", 30.365, 48.2330555556),
("OIAD", 32.4344444444, 48.3975),
("OIAG", 30.7452777778, 49.6761111111),
("OIAH", 30.3375, 50.8277777778),
("OIAI", 32.0022222222, 49.2705555556),
("OIAJ", 30.835, 49.5347222222),
("OIAM", 30.5561111111, 49.1516666667),
("OIAW", 31.3372222222, 48.7619444444),
("OIBA", 25.8758333333, 55.0327777778),
("OIBB", 28.9447222222, 50.8344444444),
("OIBH", 27.2125, 54.3183333333),
("OIBI", 27.4838888889, 52.6183333333),
("OIBK", 26.5266666667, 53.9816666667),
("OIBL", 26.5316666667, 54.8216666667),
("OIBQ", 29.2591666667, 50.3238888889),
("OIBS", 25.9094444444, 54.5391666667),
("OIBV", 26.81, 53.3563888889),
("OICC", 34.3463888889, 47.1563888889),
("OICD", 32.9344444444, 47.4833333333),
("OICI", 33.5855555556, 46.4052777778),
("OICK", 33.4363888889, 48.2858333333),
("OICS", 35.2469444444, 47.0069444444),
("OIFE", 32.9294444444, 51.5608333333),
("OIFH", 32.5669444444, 51.6913888889),
("OIFM", 32.7505555556, 51.8616666667),
("OIFP", 32.6208333333, 51.6966666667),
("OIGG", 37.3252777778, 49.6055555556),
("OIHH", 34.8680555556, 48.5522222222),
("OIHR", 34.1402777778, 49.8483333333),
("OIID", 35.7027777778, 51.475),
("OIIG", 35.6447222222, 51.3805555556),
("OIII", 35.6891666667, 51.3133333333),
("OIIK", 36.2413888889, 50.0475),
("OIIS", 35.5908333333, 53.495),
("OIKB", 27.2180555556, 56.3777777778),
("OIKJ", 28.7266666667, 57.67),
("OIKK", 30.2611111111, 56.9566666667),
("OIKM", 29.0838888889, 58.4502777778),
("OIKP", 27.1580555556, 56.1722222222),
("OIKQ", 26.755, 55.9019444444),
("OIKR", 30.2977777778, 56.0519444444),
("OIKY", 29.5508333333, 55.6725),
("OIMB", 32.8955555556, 59.2755555556),
("OIMC", 36.5011111111, 61.0647222222),
("OIMJ", 36.4236111111, 55.1058333333),
("OIMN", 37.4930555556, 57.3005555556),
("OIMT", 33.6677777778, 56.8925),
("OIMX", 37.6277777778, 56.1730555556),
("OINE", 37.3830555556, 55.4519444444),
("OING", 36.9091666667, 54.4016666667),
("OINM", 34.1691666667, 51.3175),
("OINN", 36.6633333333, 51.4647222222),
("OINR", 36.9097222222, 50.6794444444),
("OINZ", 36.6436111111, 53.1883333333),
("OISD", 28.7213888889, 54.4411111111),
("OISF", 28.8919444444, 53.7227777778),
("OISJ", 28.5863888889, 53.5788888889),
("OISL", 27.6736111111, 54.3813888889),
("OISO", 29.7541666667, 52.6941666667),
("OISR", 27.3708333333, 53.1891666667),
("OISS", 29.5391666667, 52.5894444444),
("OITL", 38.3261111111, 48.4241666667),
("OITM", 37.3486111111, 46.1261111111),
("OITP", 39.6036111111, 47.8811111111),
("OITT", 38.1327777778, 46.2347222222),
("OITZ", 36.7741666667, 48.3597222222),
("OIYY", 31.9047222222, 54.2763888889),
("OIZB", 31.0969444444, 61.5438888889),
("OIZC", 25.4433333333, 60.3819444444),
("OIZH", 29.4761111111, 60.9058333333),
("OIZI", 27.2363888889, 60.72),
("OIZJ", 25.6533333333, 57.7991666667),
("OJAI", 31.7225, 35.9930555556),
("OJAM", 31.9725, 35.9913888889),
("OJAQ", 29.6113888889, 35.0180555556),
("OJJR", 31.8647222222, 35.2191666667),
("OJMF", 32.3561111111, 36.2591666667),
("OKBK", 29.2266666667, 47.98),
("OLBA", 33.8133333333, 35.4886111111),
("OLKA", 34.5891666667, 36.0111111111),
("OMAA", 24.4327777778, 54.6511111111),
("OMAD", 24.4283333333, 54.4580555556),
("OMAH", 24.0736111111, 52.4633333333),
("OMAJ", 24.1872222222, 52.6138888889),
("OMAL", 24.2616666667, 55.6091666667),
("OMAM", 24.2480555556, 54.5475),
("OMAR", 24.7802777778, 52.5597222222),
("OMAS", 25.1461111111, 52.8736111111),
("OMAZ", 24.8613888889, 53.0777777778),
("OMDB", 25.2547222222, 55.3641666667),
("OMFJ", 25.1122222222, 56.3238888889),
("OMRK", 25.6133333333, 55.9386111111),
("OMSJ", 25.3283333333, 55.5169444444),
("OOKB", 26.1711111111, 56.2405555556),
("OOMA", 20.6752777778, 58.8902777778),
("OOMS", 23.5930555556, 58.2844444444),
("OOSA", 17.0386111111, 54.0911111111),
("OOSQ", 23.0666666667, 57.65),
("OOTH", 17.6658333333, 54.0244444444),
("OPBN", 32.9719444444, 70.5247222222),
("OPBW", 29.3469444444, 71.7113888889),
("OPCH", 35.8863888889, 71.8005555556),
("OPDB", 28.8747222222, 64.4044444444),
("OPDG", 29.9611111111, 70.4855555556),
("OPDI", 31.9091666667, 70.8963888889),
("OPFA", 31.365, 72.9952777778),
("OPGD", 25.2330555556, 62.3294444444),
("OPGT", 35.9186111111, 74.3336111111),
("OPJA", 28.2841666667, 68.4494444444),
("OPJI", 25.0677777778, 61.8052777778),
("OPKC", 24.9063888889, 67.1605555556),
("OPKD", 25.3180555556, 68.3661111111),
("OPKH", 27.7925, 66.6427777778),
("OPKN", 28.5944444444, 65.4247222222),
("OPLA", 31.5213888889, 74.4033333333),
("OPLH", 31.4947222222, 74.3461111111),
("OPMA", 33.05, 73.6383333333),
("OPMF", 34.3383333333, 73.5083333333),
("OPMI", 32.5630555556, 71.5705555556),
("OPMJ", 27.335, 68.1427777778),
("OPMK", 25.6825, 69.0727777778),
("OPMN", 33.0125, 70.0641666667),
("OPMR", 24.8933333333, 66.9386111111),
("OPMT", 30.2030555556, 71.4188888889),
("OPNH", 26.2191666667, 68.39),
("OPNK", 29.5377777778, 66.0222222222),
("OPOK", 30.7408333333, 73.3575),
("OPOR", 25.2730555556, 64.5883333333),
("OPPC", 33.9025, 70.0713888889),
("OPPG", 26.9544444444, 64.1325),
("OPPI", 25.2836111111, 63.3327777778),
("OPPS", 33.9938888889, 71.5144444444),
("OPQS", 33.5611111111, 73.0319444444),
("OPQT", 30.2511111111, 66.9375),
("OPRK", 28.3852777778, 70.2797222222),
("OPRN", 33.6163888889, 73.0991666667),
("OPRQ", 30.7580555556, 72.2825),
("OPRS", 34.0811111111, 71.9725),
("OPRT", 33.8491666667, 73.7977777778),
("OPSD", 35.3347222222, 75.5363888889),
("OPSK", 27.7219444444, 68.7916666667),
("OPSR", 32.0486111111, 72.665),
("OPSS", 34.8130555556, 72.3519444444),
("OPSU", 28.645, 69.1766666667),
("OPSW", 31.8894444444, 72.3091666667),
("OPTA", 33.9861111111, 72.6113888889),
("OPTH", 24.8413888889, 68.8383333333),
("OPTU", 25.9861111111, 63.03),
("OPWN", 32.3052777778, 69.5694444444),
("OPZB", 31.3583333333, 69.4633333333),
("ORBS", 33.2619444444, 44.2338888889),
("ORMM", 30.5486111111, 47.6622222222),
("OSAP", 36.1805555556, 37.2241666667),
("OSDI", 33.4113888889, 36.5155555556),
("OSDZ", 35.2852777778, 40.1758333333),
("OSKL", 37.0236111111, 41.1944444444),
("OSLK", 35.4008333333, 35.9486111111),
("OSPR", 34.5572222222, 38.3166666667),
("OTBD", 25.2611111111, 51.565),
("PAAQ", 61.5947222222, -149.088611111),
("PABA", 70.1338888889, -143.576944444),
("PABE", 60.7797222222, -161.837777778),
("PABI", 63.9944444444, -145.721388889),
("PABM", 59.3616666667, -155.257222222),
("PABR", 71.2852777778, -156.765833333),
("PABT", 66.9152777778, -151.528055556),
("PACD", 55.2055555556, -162.724166667),
("PACL", 64.3011111111, -149.12),
("PACV", 60.4916666667, -145.4775),
("PACZ", 61.7802777778, -166.038611111),
("PADK", 51.8777777778, -176.645833333),
("PADL", 59.0452777778, -158.503333333),
("PADQ", 57.75, -152.493611111),
("PADU", 53.9, -166.543333333),
("PAED", 61.2511111111, -149.806388889),
("PAEH", 58.6472222222, -162.060555556),
("PAEI", 64.6655555556, -147.101388889),
("PAEN", 60.5730555556, -151.245),
("PAFA", 64.815, -147.856111111),
("PAFB", 64.8375, -147.614444444),
("PAFR", 61.2661111111, -149.653055556),
("PAGA", 64.7361111111, -156.937222222),
("PAGK", 62.1547222222, -145.456388889),
("PAGY", 59.46, -135.315555556),
("PAHO", 59.6455555556, -151.476388889),
("PAIL", 59.7536111111, -154.910833333),
("PAIM", 65.9927777778, -153.704166667),
("PAJN", 58.3547222222, -134.576111111),
("PAKN", 58.6766666667, -156.649166667),
("PAKT", 55.3555555556, -131.713611111),
("PALU", 68.875, -166.11),
("PAMC", 62.9527777778, -155.605555556),
("PAMD", 59.4497222222, -146.308611111),
("PAMR", 61.2141666667, -149.846111111),
("PANC", 61.1741666667, -149.996111111),
("PANT", 55.0422222222, -131.572222222),
("PAOM", 64.5119444444, -165.445),
("PAOR", 62.9611111111, -141.928888889),
("PAOT", 66.8844444444, -162.598333333),
("PAPB", 56.5783333333, -169.661388889),
("PAPM", 59.0111111111, -161.819444444),
("PASC", 70.1947222222, -148.465),
("PASI", 57.0469444444, -135.361388889),
("PASN", 57.1672222222, -170.220277778),
("PASV", 61.0972222222, -155.574166667),
("PASY", 52.7122222222, 174.113611111),
("PATA", 65.1741666667, -152.109166667),
("PATC", 65.5630555556, -167.922222222),
("PATK", 62.3202777778, -150.093611111),
("PATL", 62.8941666667, -155.976388889),
("PAUN", 63.8883333333, -160.798888889),
("PAVD", 61.1338888889, -146.248333333),
("PAWT", 70.6133333333, -159.860277778),
("PAYA", 59.5030555556, -139.66),
("PCIS", -2.76666666667, -0.0),
("PFYU", 66.5713888889, -145.250277778),
("PGRO", 14.1744444444, 145.243333333),
("PGSN", 15.1194444444, 145.729166667),
("PGTW", 13.5, 144.833333333),
("PGUA", 13.5838888889, 144.93),
("PGUM", 13.4838888889, 144.796944444),
("PGWT", 14.9977777778, 145.619166667),
("PHBK", 22.0216666667, -159.786666667),
("PHDH", 21.5791666667, -158.210277778),
("PHHI", 21.4833333333, -158.039444444),
("PHHN", 20.7955555556, -156.014166667),
("PHJH", 20.9627777778, -156.674166667),
("PHKO", 19.7386111111, -156.045555556),
("PHLI", 21.9761111111, -159.338611111),
("PHMK", 21.1527777778, -157.096111111),
("PHMU", 20.0011111111, -155.668055556),
("PHNG", 21.4491666667, -157.767777778),
("PHNL", 21.3158333333, -157.926666667),
("PHNY", 20.7855555556, -156.951388889),
("PHOG", 20.8986111111, -156.430555556),
("PHSF", 19.76, -155.553611111),
("PHTO", 19.7202777778, -155.048611111),
("PHUP", 20.265, -155.859722222),
("PJON", 16.7286111111, -169.534166667),
("PKMA", 11.3408333333, 162.327777778),
("PKMJ", 7.06472222222, 171.271944444),
("PKRO", 9.39666666667, 167.470833333),
("PKWA", 8.72, 167.731388889),
("PLCH", 1.98611111111, -157.349722222),
("PMDY", 28.2013888889, -177.381388889),
("POLI", 70.4994444444, -149.879444444),
("PPIZ", 69.7327777778, -163.005277778),
("PTKK", 7.46166666667, 151.842777778),
("PTPN", 6.985, 158.208888889),
("PTRO", 7.3675, 134.543888889),
("PTSA", 5.35666666667, 162.958333333),
("PTYA", 9.49861111111, 138.082222222),
("RCBS", 24.4319444444, 118.359444444),
("RCDC", 22.6722222222, 120.461666667),
("RCDI", 24.855, 121.2375),
("RCFN", 22.7566666667, 121.093333333),
("RCGI", 22.6744444444, 121.458333333),
("RCGM", 25.0555555556, 121.2425),
("RCKH", 22.5752777778, 120.350833333),
("RCKU", 23.4616666667, 120.392777778),
("RCLG", 24.1861111111, 120.653611111),
("RCLY", 22.0294444444, 121.527222222),
("RCMQ", 24.2644444444, 120.620555556),
("RCMT", 26.2238888889, 120.0025),
("RCNN", 22.9502777778, 120.205555556),
("RCPO", 24.8177777778, 120.939166667),
("RCQC", 23.5686111111, 119.628055556),
("RCQS", 22.7930555556, 121.181944444),
("RCRA", 22.7047222222, 120.280555556),
("RCSQ", 22.7, 120.482222222),
("RCSS", 25.0694444444, 121.551666667),
("RCTP", 25.08, 121.232222222),
("RCWA", 23.3708333333, 119.494444444),
("RCYU", 24.0230555556, 121.617777778),
("RJAA", 35.7647222222, 140.386388889),
("RJAF", 36.1666666667, 137.922777778),
("RJAH", 36.1808333333, 140.415277778),
("RJAM", 24.2894444444, 153.978888889),
("RJAW", 24.7838888889, 141.3225),
("RJBD", 33.6622222222, 135.364444444),
("RJBK", 34.5908333333, 133.933055556),
("RJCB", 42.7333333333, 143.217222222),
("RJCC", 42.775, 141.692222222),
("RJCH", 41.77, 140.821944444),
("RJCJ", 42.7944444444, 141.666388889),
("RJCM", 43.8805555556, 144.163888889),
("RJCN", 43.5772222222, 144.959722222),
("RJCO", 43.1161111111, 141.38),
("RJCT", 42.8902777778, 143.158333333),
("RJCW", 45.4038888889, 141.800833333),
("RJDB", 33.7488888889, 129.785277778),
("RJDC", 33.93, 131.278888889),
("RJDT", 34.2847222222, 129.330277778),
("RJEB", 44.3038888889, 143.403888889),
("RJEC", 43.6708333333, 142.4475),
("RJER", 45.2419444444, 141.186388889),
("RJFA", 33.8830555556, 130.652777778),
("RJFC", 30.3855555556, 130.658888889),
("RJFE", 32.6661111111, 128.832777778),
("RJFF", 33.5863888889, 130.45),
("RJFG", 30.5466666667, 130.95),
("RJFK", 31.8033333333, 130.719166667),
("RJFM", 31.8769444444, 131.448333333),
("RJFN", 32.0836111111, 131.451666667),
("RJFO", 33.4794444444, 131.737222222),
("RJFR", 33.8361111111, 130.946944444),
("RJFT", 32.8372222222, 130.855),
("RJFU", 32.9225, 129.923333333),
("RJFY", 31.3675, 130.845277778),
("RJFZ", 33.6852777778, 131.040555556),
("RJKA", 28.4305555556, 129.7125),
("RJKB", 27.4252777778, 128.700833333),
("RJKN", 27.8361111111, 128.881111111),
("RJNF", 36.1427777778, 136.223888889),
("RJNG", 35.3941666667, 136.869444444),
("RJNH", 34.75, 137.703055556),
("RJNK", 36.3936111111, 136.407777778),
("RJNN", 35.255, 136.924444444),
("RJNO", 36.1811111111, 133.324722222),
("RJNT", 36.6483333333, 137.187222222),
("RJNY", 34.8125, 138.297777778),
("RJOA", 34.4352777778, 132.921944444),
("RJOB", 34.7569444444, 133.855555556),
("RJOC", 35.4136111111, 132.89),
("RJOF", 34.0344444444, 131.549166667),
("RJOH", 35.4922222222, 133.236388889),
("RJOI", 34.1436111111, 132.235555556),
("RJOK", 33.5444444444, 133.671388889),
("RJOM", 33.8272222222, 132.699722222),
("RJOO", 34.7852777778, 135.438055556),
("RJOR", 35.53, 134.166388889),
("RJOS", 34.1327777778, 134.606388889),
("RJOT", 34.2138888889, 134.015555556),
("RJOY", 34.5961111111, 135.602777778),
("RJOZ", 34.0452777778, 131.051944444),
("RJSA", 40.7344444444, 140.690833333),
("RJSC", 38.4116666667, 140.371111111),
("RJSH", 40.5563888889, 141.466111111),
("RJSI", 39.4308333333, 141.135833333),
("RJSK", 39.6155555556, 140.218611111),
("RJSM", 40.7030555556, 141.368333333),
("RJSS", 38.1394444444, 140.916666667),
("RJST", 38.4047222222, 141.219444444),
("RJSY", 38.8116666667, 139.786944444),
("RJTA", 35.4544444444, 139.45),
("RJTE", 34.9869444444, 139.829166667),
("RJTF", 35.6716666667, 139.528055556),
("RJTH", 33.115, 139.785555556),
("RJTJ", 35.8413888889, 139.409722222),
("RJTK", 35.3980555556, 139.909722222),
("RJTL", 35.7988888889, 140.011111111),
("RJTO", 34.7844444444, 139.361388889),
("RJTQ", 34.0719444444, 139.559722222),
("RJTR", 35.5136111111, 139.393611111),
("RJTT", 35.5522222222, 139.779444444),
("RJTY", 35.7483333333, 139.348333333),
("RKJJ", 35.1255555556, 126.809722222),
("RKJK", 35.9036111111, 126.615833333),
("RKJM", 34.7588888889, 126.379722222),
("RKJU", 35.8783333333, 127.119444444),
("RKJY", 34.8397222222, 127.615277778),
("RKNC", 37.8836111111, 127.717777778),
("RKND", 38.1475, 128.600555556),
("RKNN", 37.7533333333, 128.943888889),
("RKNW", 37.4380555556, 127.960277778),
("RKNY", 38.0611111111, 128.668888889),
("RKPC", 33.5111111111, 126.492777778),
("RKPE", 35.1411111111, 128.695555556),
("RKPK", 35.1794444444, 128.938055556),
("RKPP", 35.1708333333, 129.128611111),
("RKPS", 35.0883333333, 128.070277778),
("RKPU", 35.5933333333, 129.351666667),
("RKSG", 36.9605555556, 127.033333333),
("RKSM", 37.4458333333, 127.113888889),
("RKSO", 37.0905555556, 127.029444444),
("RKSS", 37.5580555556, 126.790555556),
("RKSW", 37.2391666667, 127.006944444),
("RKTH", 35.9877777778, 129.420277778),
("RKTJ", 35.8563888889, 129.211388889),
("RKTN", 35.8938888889, 128.658611111),
("RKTU", 36.7163888889, 127.498888889),
("RKTY", 36.6316666667, 128.354722222),
("ROAH", 26.1955555556, 127.645833333),
("RODE", 26.7286111111, 127.761666667),
("RODN", 26.3555555556, 127.7675),
("ROIG", 24.3444444444, 124.186944444),
("ROKJ", 26.3633333333, 126.713611111),
("ROMD", 25.8463888889, 131.263333333),
("ROMY", 24.7827777778, 125.295),
("RORK", 25.9477777778, 131.321388889),
("RORS", 24.8266666667, 125.144722222),
("RORY", 27.0438888889, 128.401388889),
("ROTM", 26.2741666667, 127.756388889),
("ROYN", 24.4669444444, 122.977777778),
("RPLL", 14.5086111111, 121.019444444),
("RPMB", 6.10555555556, 125.236111111),
("RPMC", 7.16472222222, 124.210277778),
("RPML", 8.41444444444, 124.611388889),
("RPMM", 7.61722222222, 124.058611111),
("RPMP", 7.82777777778, 123.460277778),
("RPMR", 6.05805555556, 125.096111111),
("RPMS", 9.75777777778, 125.480833333),
("RPMZ", 6.92222222222, 122.059444444),
("RPUB", 16.375, 120.618888889),
("RPUD", 14.1291666667, 122.980277778),
("RPUF", 14.9863888889, 120.4925),
("RPUG", 16.0347222222, 120.240833333),
("RPUH", 12.3613888889, 121.046388889),
("RPUI", 15.3255555556, 119.968888889),
("RPUL", 13.955, 121.124722222),
("RPUM", 13.2080555556, 120.605277778),
("RPUN", 13.5852777778, 123.270833333),
("RPUO", 20.4516666667, 121.98),
("RPUP", 14.2927777778, 122.645555556),
("RPUQ", 17.5536111111, 120.3575),
("RPUS", 16.5955555556, 120.303055556),
("RPUT", 17.6380555556, 121.730555556),
("RPUV", 13.5775, 124.206111111),
("RPUW", 13.3611111111, 121.825277778),
("RPUY", 16.9297222222, 121.753333333),
("RPUZ", 16.6188888889, 121.252222222),
("RPVA", 11.2272222222, 125.027777778),
("RPVB", 10.6425, 122.929444444),
("RPVC", 12.0725, 124.545),
("RPVD", 9.33416666667, 123.301944444),
("RPVF", 12.5022222222, 124.635555556),
("RPVG", 11.0355555556, 125.742777778),
("RPVI", 10.7130555556, 122.545),
("RPVK", 11.6811111111, 122.377777778),
("RPVM", 10.3075, 123.979166667),
("RPVO", 11.0558333333, 124.565555556),
("RPVP", 9.74194444444, 118.758611111),
("RPVR", 11.5975, 122.752777778),
("RPVS", 10.7661111111, 121.932222222),
("SAAC", -31.2969444444, -57.9963888889),
("SAAG", -33.0058333333, -58.6130555556),
("SAAI", -35.3477777778, -57.2938888889),
("SAAJ", -34.5458333333, -60.9305555556),
("SAAP", -31.7947222222, -60.4802777778),
("SAAR", -32.9033333333, -60.7844444444),
("SAAV", -31.7116666667, -60.8116666667),
("SABE", -34.5591666667, -58.4155555556),
("SACC", -31.0066666667, -64.5325),
("SACO", -31.3236111111, -64.2077777778),
("SACT", -30.3452777778, -66.2936111111),
("SADD", -34.5005555556, -58.6041666667),
("SADF", -34.4530555556, -58.5894444444),
("SADJ", -34.5605555556, -58.7894444444),
("SADL", -34.9722222222, -57.8944444444),
("SADM", -34.6761111111, -58.6425),
("SADP", -34.6097222222, -58.6125),
("SADS", -34.7313888889, -58.5994444444),
("SAHC", -37.4444444444, -70.2222222222),
("SAHR", -39.0005555556, -67.6202777778),
("SAME", -32.8316666667, -68.7927777778),
("SAMM", -35.4838888889, -69.5825),
("SAMQ", -32.8658333333, -68.8722222222),
("SAMR", -34.5880555556, -68.4025),
("SANC", -28.5955555556, -65.7516666667),
("SANE", -27.7655555556, -64.31),
("SANI", -28.0375, -67.5802777778),
("SANL", -29.3813888889, -66.7958333333),
("SANO", -29.2238888889, -67.4388888889),
("SANT", -26.8408333333, -65.1047222222),
("SANU", -31.5713888889, -68.4180555556),
("SAOC", -33.0855555556, -64.2613888889),
("SAOD", -31.9411111111, -65.1422222222),
("SAOL", -34.1352777778, -63.3622222222),
("SAOM", -32.6836111111, -62.1577777778),
("SAOR", -33.7297222222, -65.3872222222),
("SAOU", -33.2730555556, -66.3563888889),
("SARC", -27.4452777778, -58.7616666667),
("SARE", -27.4497222222, -59.0561111111),
("SARF", -26.2125, -58.2280555556),
("SARI", -25.7375, -54.4730555556),
("SARL", -29.6891666667, -57.1519444444),
("SARM", -30.2716666667, -57.64),
("SARP", -27.3858333333, -55.9705555556),
("SARS", -26.7563888889, -60.4930555556),
("SASA", -24.8558333333, -65.4861111111),
("SASJ", -24.3927777778, -65.0977777778),
("SASO", -23.1527777778, -64.3291666667),
("SASQ", -22.1622222222, -65.5697222222),
("SAST", -22.6197222222, -63.7936111111),
("SATG", -29.1058333333, -59.2186111111),
("SATK", -24.7211111111, -60.5486111111),
("SATM", -29.2230555556, -58.0880555556),
("SATO", -27.5180555556, -55.1238888889),
("SATR", -29.21, -59.6908333333),
("SATU", -29.7705555556, -57.9788888889),
("SAVB", -41.9430555556, -71.5322222222),
("SAVC", -45.785, -67.4655555556),
("SAVD", -42.0305555556, -71.1697222222),
("SAVE", -42.9077777778, -71.1394444444),
("SAVH", -46.5383333333, -68.9658333333),
("SAVT", -43.2102777778, -65.2702777778),
("SAVV", -40.8691666667, -63.0002777778),
("SAVY", -42.7588888889, -65.1025),
("SAWA", -50.3352777778, -72.2483333333),
("SAWB", -64.2383333333, -56.6308333333),
("SAWD", -47.7352777778, -65.9038888889),
("SAWE", -53.7775, -67.7491666667),
("SAWG", -51.6086111111, -69.3125),
("SAWH", -54.8430555556, -68.2955555556),
("SAWJ", -49.3066666667, -67.8025),
("SAWS", -44.0480555556, -70.4591666667),
("SAWT", -51.6063888889, -72.2166666667),
("SAWU", -50.0163888889, -68.5791666667),
("SAZB", -38.7247222222, -62.1691666667),
("SAZC", -37.4461111111, -61.8891666667),
("SAZD", -36.3202777778, -57.7216666667),
("SAZF", -36.8908333333, -60.2161111111),
("SAZG", -35.6961111111, -63.7580555556),
("SAZH", -38.3866666667, -60.3294444444),
("SAZI", -36.1869444444, -61.0761111111),
("SAZL", -36.5422222222, -56.7216666667),
("SAZM", -37.9341666667, -57.5733333333),
("SAZN", -38.9488888889, -68.1555555556),
("SAZO", -38.4894444444, -58.8158333333),
("SAZP", -35.8455555556, -61.8577777778),
("SAZR", -36.5880555556, -64.2752777778),
("SAZS", -41.1511111111, -71.1575),
("SAZT", -37.2372222222, -59.2277777778),
("SAZV", -37.2352777778, -57.0291666667),
("SAZW", -38.9394444444, -69.2644444444),
("SAZY", -40.0752777778, -71.1372222222),
("SBAA", -8.34833333333, -49.3013888889),
("SBAF", -22.875, -43.3844444444),
("SBAM", 2.07666666667, -50.8622222222),
("SBAQ", -21.8119444444, -48.1327777778),
("SBAR", -10.9838888889, -37.0702777778),
("SBAS", -22.6383333333, -50.4558333333),
("SBAT", -9.86583333333, -56.1061111111),
("SBAU", -21.1411111111, -50.4247222222),
("SBAV", -22.5255555556, -52.9719444444),
("SBBE", -1.37916666667, -48.4761111111),
("SBBG", -31.3902777778, -54.1122222222),
("SBBH", -19.8516666667, -43.9502777778),
("SBBI", -25.405, -49.2319444444),
("SBBQ", -21.2669444444, -43.7608333333),
("SBBR", -15.8625, -47.9125),
("SBBU", -22.345, -49.0536111111),
("SBBV", 2.84611111111, -60.69),
("SBBW", -15.8611111111, -52.3888888889),
("SBCA", -25.0002777778, -53.5005555556),
("SBCC", -9.33388888889, -54.9652777778),
("SBCF", -19.6336111111, -43.9686111111),
("SBCG", -20.4686111111, -54.6725),
("SBCH", -27.1341666667, -52.6563888889),
("SBCI", -7.32027777778, -47.4586111111),
("SBCM", -28.7255555556, -49.4247222222),
("SBCO", -29.9458333333, -51.1444444444),
("SBCP", -21.6983333333, -41.3016666667),
("SBCR", -19.0116666667, -57.6727777778),
("SBCT", -25.5283333333, -49.1755555556),
("SBCV", -17.6522222222, -39.2530555556),
("SBCX", -29.1969444444, -51.1875),
("SBCY", -15.6527777778, -56.1166666667),
("SBCZ", -7.6, -72.7694444444),
("SBDN", -22.175, -51.4244444444),
("SBEG", -3.03861111111, -60.0497222222),
("SBEK", -6.23305555556, -57.7766666667),
("SBES", -22.8127777778, -42.0925),
("SBFC", -20.5919444444, -47.3827777778),
("SBFI", -25.5961111111, -54.4869444444),
("SBFL", -27.6725, -48.5477777778),
("SBFN", -3.85472222222, -32.4233333333),
("SBFT", -20.2783333333, -49.1872222222),
("SBFU", -20.7027777778, -46.335),
("SBFZ", -3.77611111111, -38.5325),
("SBGL", -22.8088888889, -43.2436111111),
("SBGM", -10.7861111111, -65.2847222222),
("SBGO", -16.6311111111, -49.2222222222),
("SBGR", -23.4322222222, -46.4691666667),
("SBGS", -25.1844444444, -50.1438888889),
("SBGW", -22.7913888889, -45.2047222222),
("SBHT", -3.25388888889, -52.2538888889),
("SBIC", -3.12722222222, -58.4811111111),
("SBIH", -4.24222222222, -56.0005555556),
("SBIL", -14.8158333333, -39.0330555556),
("SBIP", -19.4705555556, -42.4875),
("SBIT", -18.4444444444, -49.2133333333),
("SBIZ", -5.53111111111, -47.46),
("SBJC", -1.41388888889, -48.4605555556),
("SBJF", -21.7913888889, -43.3866666667),
("SBJP", -7.26972222222, -35.8961111111),
("SBJV", -26.2247222222, -48.7972222222),
("SBKP", -23.0080555556, -47.1344444444),
("SBLJ", -27.7819444444, -50.2813888889),
("SBLN", -21.6638888889, -49.7302777778),
("SBLO", -23.3336111111, -51.13),
("SBLP", -13.2619444444, -43.4080555556),
("SBLS", -19.6613888889, -43.8963888889),
("SBMA", -5.36833333333, -49.1377777778),
("SBMD", -0.889722222222, -52.6022222222),
("SBMG", -23.4397222222, -51.9069444444),
("SBMK", -16.7066666667, -43.8188888889),
("SBML", -22.1966666667, -49.9263888889),
("SBMN", -3.14555555556, -59.9861111111),
("SBMO", -9.51027777778, -35.7933333333),
("SBMQ", 0.0505555555556, -51.0719444444),
("SBMS", -5.20166666667, -37.3641666667),
("SBMT", -23.5088888889, -46.6375),
("SBMY", -5.81138888889, -61.2786111111),
("SBNF", -26.88, -48.6513888889),
("SBNM", -28.2816666667, -54.1688888889),
("SBNT", -5.91111111111, -35.2477777778),
("SBOI", 3.85527777778, -51.7966666667),
("SBPA", -29.9941666667, -51.1713888889),
("SBPB", -2.89361111111, -41.7319444444),
("SBPC", -21.8427777778, -46.5677777778),
("SBPF", -28.2438888889, -52.3263888889),
("SBPK", -31.7183333333, -52.3275),
("SBPL", -9.36388888889, -40.5638888889),
("SBPN", -10.7191666667, -48.3997222222),
("SBPP", -22.5494444444, -55.7025),
("SBPV", -8.70916666667, -63.9022222222),
("SBQV", -14.8625, -40.8630555556),
("SBRB", -9.86888888889, -67.8936111111),
("SBRF", -8.12638888889, -34.9233333333),
("SBRG", -32.0825, -52.1663888889),
("SBRJ", -22.9102777778, -43.1630555556),
("SBRP", -21.1341666667, -47.7741666667),
("SBSC", -22.9322222222, -43.7188888889),
("SBSJ", -23.2291666667, -45.8613888889),
("SBSL", -2.58861111111, -44.2363888889),
("SBSM", -29.7111111111, -53.6880555556),
("SBSP", -23.6266666667, -46.6552777778),
("SBSR", -20.8163888889, -49.4063888889),
("SBST", -23.925, -46.2875),
("SBSV", -12.9108333333, -38.3308333333),
("SBTB", -1.48944444444, -56.3966666667),
("SBTE", -5.05972222222, -42.8233333333),
("SBTF", -3.38277777778, -64.7238888889),
("SBTK", -8.155, -70.7830555556),
("SBTL", -24.3175, -50.6513888889),
("SBTS", 2.22333333333, -55.9458333333),
("SBTT", -4.25555555556, -69.9355555556),
("SBTU", -3.785, -49.7194444444),
("SBUA", -0.148333333333, -66.9855555556),
("SBUF", -9.40083333333, -38.2505555556),
("SBUG", -29.7819444444, -57.0380555556),
("SBUL", -18.8827777778, -48.2255555556),
("SBUP", -20.7769444444, -51.5647222222),
("SBUR", -19.765, -47.9647222222),
("SBVG", -21.59, -45.4733333333),
("SBVH", -12.6941666667, -60.0980555556),
("SBVT", -20.2555555556, -40.2888888889),
("SBYA", 0.6075, -69.1858333333),
("SBYS", -21.9852777778, -47.3380555556),
("SCAC", -41.9061111111, -73.7966666667),
("SCAP", -43.6152777778, -71.8069444444),
("SCAR", -18.3483333333, -70.3386111111),
("SCBA", -45.9158333333, -71.6891666667),
("SCBQ", -33.5616666667, -70.6883333333),
("SCCC", -46.5816666667, -71.6975),
("SCCF", -22.4986111111, -68.9041666667),
("SCCH", -36.5827777778, -72.0316666667),
("SCCI", -53.0027777778, -70.8547222222),
("SCCY", -45.5941666667, -72.1061111111),
("SCDA", -20.535, -70.1811111111),
("SCEL", -33.3927777778, -70.7855555556),
("SCFA", -23.4444444444, -70.445),
("SCFM", -53.2536111111, -70.3191666667),
("SCGE", -37.4016666667, -72.4255555556),
("SCGZ", -54.9308333333, -67.6261111111),
("SCHA", -27.2961111111, -70.4136111111),
("SCIC", -34.9669444444, -71.2169444444),
("SCIE", -36.7725, -73.0630555556),
("SCIP", -27.1647222222, -109.421666667),
("SCJO", -40.6113888889, -73.0602777778),
("SCLL", -28.5975, -70.7591666667),
("SCRG", -34.1736111111, -70.7755555556),
("SCRM", -62.1908333333, -58.9866666667),
("SCSE", -29.9163888889, -71.1911111111),
("SCSN", -33.6566666667, -71.6155555556),
("SCST", -42.4908333333, -73.7744444444),
("SCTB", -33.4563888889, -70.5466666667),
("SCTC", -38.7686111111, -72.6358333333),
("SCTE", -41.4386111111, -73.0938888889),
("SCTI", -33.4933333333, -70.6975),
("SCTN", -42.9330555556, -72.6991666667),
("SCVD", -39.6497222222, -73.0861111111),
("SEAM", -1.2125, -78.5741666667),
("SECM", -1.70611111111, -79.3788888889),
("SECO", -0.462777777778, -76.9863888889),
("SECU", -2.88944444444, -78.9841666667),
("SEGS", -0.453611111111, -90.2658333333),
("SEGU", -2.15777777778, -79.8838888889),
("SEGZ", -3.42333333333, -78.5730555556),
("SEIB", 0.338333333333, -78.1363888889),
("SELA", 0.0922222222222, -76.8691666667),
("SELJ", -1.70416666667, -79.5522222222),
("SELM", -1.50138888889, -79.4808333333),
("SELT", -0.906666666667, -78.6155555556),
("SEMA", -4.38027777778, -79.9405555556),
("SEMC", -2.29916666667, -78.1205555556),
("SEMH", -3.26888888889, -79.9616666667),
("SEMO", -2.06694444444, -76.9752777778),
("SEMT", -0.945555555556, -80.6780555556),
("SEMX", -2.85083333333, -79.8036111111),
("SEMY", -1.73972222222, -79.6216666667),
("SEPD", 0.0730555555556, -80.0522222222),
("SEPV", -1.04138888889, -80.4719444444),
("SEQU", -0.141111111111, -78.4880555556),
("SERB", -1.65361111111, -78.6561111111),
("SERO", -3.43527777778, -79.9777777778),
("SESA", -2.20472222222, -80.9886111111),
("SESD", -0.248055555556, -79.2144444444),
("SEST", -0.909722222222, -89.6158333333),
("SETA", -2.26083333333, -79.6891666667),
("SETE", -0.986666666667, -77.8194444444),
("SETR", -0.122777777778, -76.3375),
("SETU", 0.809444444444, -77.7080555556),
("SFAL", -51.6855555556, -57.7775),
("SGAS", -25.2397222222, -57.5188888889),
("SGAY", -27.3705555556, -56.8538888889),
("SGCO", -23.4416666667, -57.4269444444),
("SGFI", -22.3597222222, -60.0536111111),
("SGIB", -25.4075, -54.6194444444),
("SGME", -22.0447222222, -60.6216666667),
("SGPI", -26.8813888889, -58.3177777778),
("SKAR", 4.45361111111, -75.7652777778),
("SKAS", 0.505, -76.5008333333),
("SKBC", 9.04527777778, -73.9747222222),
("SKBG", 7.12638888889, -73.1847222222),
("SKBO", 4.70138888889, -74.1469444444),
("SKBQ", 10.8894444444, -74.7805555556),
("SKBS", 6.20277777778, -77.3944444444),
("SKBU", 3.81944444444, -76.9897222222),
("SKCC", 7.9275, -72.5113888889),
("SKCD", 5.07166666667, -76.6763888889),
("SKCG", 10.4422222222, -75.5127777778),
("SKCL", 3.54305555556, -76.3813888889),
("SKCO", 1.81416666667, -78.7491666667),
("SKCZ", 9.3325, -75.2855555556),
("SKEJ", 7.02416666667, -73.8066666667),
("SKFL", 1.59027777778, -75.5638888889),
("SKGI", 4.27611111111, -74.7966666667),
("SKGO", 4.75805555556, -75.9555555556),
("SKGP", 2.57, -77.8983333333),
("SKGY", 4.81222222222, -74.0647222222),
("SKIB", 4.42138888889, -75.1330555556),
("SKIP", 0.861666666667, -77.6716666667),
("SKLC", 7.81194444444, -76.7163888889),
("SKLM", 11.2325, -72.49),
("SKLT", -4.19305555556, -69.9425),
("SKMD", 6.22, -75.5905555556),
("SKMG", 9.28333333333, -74.8394444444),
("SKMR", 8.82361111111, -75.8258333333),
("SKMU", 1.25361111111, -70.2336111111),
("SKMZ", 5.02972222222, -75.465),
("SKNV", 2.95, -75.2938888889),
("SKOC", 8.31472222222, -73.3583333333),
("SKOT", 7.01027777778, -74.7152777778),
("SKPB", 12.2213888889, -71.9847222222),
("SKPC", 6.18444444444, -67.4930555556),
("SKPE", 4.8125, -75.7394444444),
("SKPI", 1.85833333333, -76.0858333333),
("SKPP", 2.45416666667, -76.61),
("SKPS", 1.39638888889, -77.2908333333),
("SKPV", 13.3569444444, -81.3583333333),
("SKQU", 5.2125, -74.8836111111),
("SKRG", 6.16444444444, -75.4230555556),
("SKRH", 11.5261111111, -72.9258333333),
("SKSA", 6.95805555556, -71.855),
("SKSJ", 2.57944444444, -72.6391666667),
("SKSM", 11.1194444444, -74.2305555556),
("SKSP", 12.5833333333, -81.7111111111),
("SKSV", 2.15194444444, -74.7661111111),
("SKTD", 5.43027777778, -71.6580555556),
("SKTM", 6.45083333333, -71.76),
("SKTU", 8.07444444444, -76.7411111111),
("SKUC", 7.06861111111, -70.7366666667),
("SKUI", 5.69055555556, -76.6411111111),
("SKUL", 4.08833333333, -76.235),
("SKVP", 10.435, -73.2494444444),
("SKVV", 4.16777777778, -73.6136111111),
("SKYP", 5.31888888889, -72.3838888889),
("SLAP", -14.7394444444, -68.4108333333),
("SLAS", -15.7191666667, -63.0922222222),
("SLBJ", -22.7694444444, -64.315),
("SLCA", -20.0069444444, -63.5275),
("SLCB", -17.4208333333, -66.1769444444),
("SLCH", -16.99, -65.1413888889),
("SLCN", -17.5938888889, -69.4341666667),
("SLCO", -11.0402777778, -68.7827777778),
("SLCP", -16.1433333333, -62.0258333333),
("SLGY", -10.8216666667, -65.3458333333),
("SLJO", -13.0527777778, -64.6616666667),
("SLLP", -16.5130555556, -68.1922222222),
("SLMG", -13.2536111111, -64.0627777778),
("SLOR", -17.9633333333, -67.0761111111),
("SLPO", -19.5433333333, -65.7238888889),
("SLPS", -18.9752777778, -57.8205555556),
("SLRA", -13.2636111111, -64.6052777778),
("SLRB", -18.3280555556, -59.7661111111),
("SLRI", -11.0102777778, -66.0733333333),
("SLRQ", -14.4283333333, -67.5013888889),
("SLRY", -14.3061111111, -67.3536111111),
("SLSA", -13.7619444444, -65.435),
("SLSB", -14.8575, -66.7375),
("SLSI", -16.3844444444, -60.9627777778),
("SLSM", -14.9655555556, -65.6336111111),
("SLSU", -19.0069444444, -65.2888888889),
("SLTI", -16.3386111111, -58.4016666667),
("SLTJ", -21.5555555556, -64.7008333333),
("SLTR", -14.8194444444, -64.9183333333),
("SLVR", -17.6447222222, -63.1352777778),
("SMJP", 5.45277777778, -55.1877777778),
("SMZO", 5.81083333333, -55.1905555556),
("SOCA", 4.81972222222, -52.3602777778),
("SOOG", 3.8975, -51.8038888889),
("SPAC", -4.6075, -77.9408333333),
("SPAS", -2.795, -76.4661111111),
("SPAY", -10.7288888889, -73.7663888889),
("SPBB", -6.01888888889, -76.9883333333),
("SPBC", -3.91666666667, -70.5080555556),
("SPBR", -11.4113888889, -69.4886111111),
("SPCL", -8.37777777778, -74.5741666667),
("SPEO", -9.15083333333, -78.5236111111),
("SPEP", -9.76805555556, -70.7063888889),
("SPEQ", -17.1788888889, -70.9305555556),
("SPGM", -9.29, -76.0058333333),
("SPHI", -6.78722222222, -79.8280555556),
("SPHO", -13.1547222222, -74.2041666667),
("SPHY", -13.7063888889, -73.3502777778),
("SPHZ", -9.34722222222, -77.5983333333),
("SPIM", -12.0216666667, -77.1141666667),
("SPJI", -7.16944444444, -76.7277777778),
("SPJJ", -11.7830555556, -75.4733333333),
("SPJL", -15.4669444444, -70.1580555556),
("SPJN", -15.3575, -75.1355555556),
("SPJR", -7.13638888889, -78.49),
("SPLO", -17.695, -71.3438888889),
("SPLP", -12.1605555556, -76.9988888889),
("SPME", -3.5525, -80.3808333333),
("SPMS", -5.89388888889, -76.1183333333),
("SPNC", -9.87805555556, -76.2041666667),
("SPOL", -11.9286111111, -77.0611111111),
("SPPY", -6.20166666667, -77.8558333333),
("SPQT", -3.78472222222, -73.3086111111),
("SPQU", -16.3408333333, -71.5830555556),
("SPRM", -11.1286111111, -75.3502777778),
("SPRU", -8.08472222222, -79.1094444444),
("SPSO", -13.7447222222, -76.2202777778),
("SPST", -6.50861111111, -76.3730555556),
("SPTN", -18.0533333333, -70.2758333333),
("SPTP", -4.54972222222, -81.2238888889),
("SPTU", -12.6136111111, -69.2288888889),
("SPUR", -5.20555555556, -80.6163888889),
("SPYL", -4.57638888889, -81.2538888889),
("SPZA", -14.8538888889, -74.9613888889),
("SPZO", -13.5355555556, -71.9386111111),
("SUAA", -34.7891666667, -56.2644444444),
("SUAG", -30.4005555556, -56.5077777778),
("SUCA", -34.4563888889, -57.7705555556),
("SUDU", -33.3586111111, -56.4991666667),
("SULS", -34.855, -55.0941666667),
("SUMO", -32.3377777778, -54.2166666667),
("SUMU", -34.8377777778, -56.0302777778),
("SUPE", -34.9136111111, -54.9205555556),
("SUPU", -32.365, -58.0611111111),
("SURV", -30.9744444444, -55.4761111111),
("SUSO", -31.4397222222, -57.9905555556),
("SUTB", -31.7488888889, -55.9255555556),
("SVAC", 9.55333333333, -69.2377777778),
("SVAN", 9.42916666667, -64.4708333333),
("SVAT", 4.05166666667, -67.7008333333),
("SVBC", 10.1069444444, -64.6888888889),
("SVBI", 8.61944444444, -70.2205555556),
("SVBL", 10.1833333333, -67.5572222222),
("SVBM", 10.0425, -69.3583333333),
("SVBS", 10.2497222222, -67.6491666667),
("SVCB", 8.12194444444, -63.5369444444),
("SVCD", 7.62583333333, -66.1647222222),
("SVCJ", 9.64888888889, -68.5752777778),
("SVCL", 8.92444444444, -67.4169444444),
("SVCN", 6.23194444444, -62.8541666667),
("SVCO", 10.1755555556, -70.065),
("SVCP", 10.66, -63.2616666667),
("SVCR", 11.4147222222, -69.6808333333),
("SVCS", 10.2861111111, -66.8158333333),
("SVCU", 10.4502777778, -64.1302777778),
("SVCZ", 9.37194444444, -66.9227777778),
("SVED", 6.715, -61.6388888889),
("SVEZ", 7.05972222222, -69.4966666667),
("SVGD", 7.21083333333, -70.7563888889),
("SVGI", 10.5738888889, -62.3125),
("SVGU", 9.02694444444, -69.755),
("SVHG", 10.4622222222, -66.0925),
("SVIE", 10.7941666667, -63.9813888889),
("SVJC", 11.7808333333, -70.1516666667),
("SVJM", 9.90694444444, -67.3794444444),
("SVLF", 8.23916666667, -72.2708333333),
("SVLO", 11.8086111111, -66.1791666667),
("SVMC", 10.5580555556, -71.7277777778),
("SVMD", 8.58194444444, -71.1608333333),
("SVMG", 10.9125, -63.9663888889),
("SVMI", 10.6030555556, -66.9905555556),
("SVMT", 9.74916666667, -63.1522222222),
("SVPA", 5.61972222222, -67.6058333333),
("SVPC", 10.4802777778, -68.0727777778),
("SVPM", 7.80111111111, -72.2027777778),
("SVPR", 8.28833333333, -62.7602777778),
("SVPT", 7.57555555556, -70.1741666667),
("SVSA", 7.85222222222, -72.4347222222),
("SVSB", 7.80333333333, -71.1655555556),
("SVSO", 7.565, -72.035),
("SVSP", 10.2786111111, -68.755),
("SVSR", 7.8825, -67.4436111111),
("SVST", 8.945, -64.1508333333),
("SVSZ", 8.97444444444, -71.9430555556),
("SVTC", 9.08888888889, -62.0941666667),
("SVTM", 7.24916666667, -61.5288888889),
("SVVA", 10.1580555556, -67.9266666667),
("SVVG", 8.62416666667, -71.6727777778),
("SVVL", 9.34055555556, -70.5838888889),
("SVVP", 9.22194444444, -65.9933333333),
("SYKM", 5.86527777778, -60.6141666667),
("SYLD", 5.96583333333, -58.2702777778),
("SYLT", 3.3725, -59.7891666667),
("TAPA", 17.1366666667, -61.7925),
("TBPB", 13.0744444444, -59.4922222222),
("TDCF", 15.3366666667, -61.3919444444),
("TDPD", 15.5469444444, -61.3),
("TFFF", 14.5908333333, -61.0030555556),
("TFFG", 18.0997222222, -63.0469444444),
("TFFJ", 17.9, -62.85),
("TFFM", 15.8686111111, -61.27),
("TFFR", 16.2652777778, -61.5316666667),
("TGPY", 12.0041666667, -61.7861111111),
("TIST", 18.3372222222, -64.9733333333),
("TISX", 17.7016666667, -64.7983333333),
("TJBQ", 18.4947222222, -67.1294444444),
("TJFA", 18.3088888889, -65.6616666667),
("TJIG", 18.4566666667, -66.0980555556),
("TJMZ", 18.2555555556, -67.1483333333),
("TJNR", 18.245, -65.6433333333),
("TJPS", 18.0080555556, -66.5627777778),
("TJSJ", 18.4391666667, -66.0016666667),
("TKPK", 17.3111111111, -62.7186111111),
("TKPN", 17.2055555556, -62.5897222222),
("TLPC", 14.02, -60.9927777778),
("TLPL", 13.7330555556, -60.9525),
("TNCA", 12.5013888889, -70.015),
("TNCB", 12.1308333333, -68.2683333333),
("TNCC", 12.1886111111, -68.9597222222),
("TNCE", 17.4963888889, -62.9791666667),
("TNCM", 18.0408333333, -63.1088888889),
("TQPF", 18.2047222222, -63.055),
("TRPM", 33.5869444444, -80.2086111111),
("TTCP", 11.1494444444, -60.8319444444),
("TTPP", 10.5952777778, -61.3372222222),
("TUPJ", 18.4447222222, -64.5427777778),
("TVSB", 12.9883333333, -61.2619444444),
("TVSC", 12.6988888889, -61.3422222222),
("TVSM", 12.8875, -61.18),
("TVSV", 13.1441666667, -61.2108333333),
("UAAA", 43.3519444444, 77.0402777778),
("UAAH", 46.8941666667, 75.0047222222),
("UACC", 51.0222222222, 71.4672222222),
("UAFM", 43.0616666667, 74.4783333333),
("UAFO", 40.6088888889, 72.7930555556),
("UAII", 42.3644444444, 69.4791666667),
("UAKD", 47.7083333333, 67.7333333333),
("UARR", 51.1516666667, 51.5455555556),
("UASS", 50.3511111111, 80.2341666667),
("UATE", 43.86, 51.0919444444),
("UATT", 50.245, 57.2033333333),
("UAUU", 53.2063888889, 63.5508333333),
("UBBB", 40.4675, 50.0466666667),
("UEEA", 34.8041666667, -96.6711111111),
("UEEE", 62.0930555556, 129.770555556),
("UERP", 66.4166666667, 112.05),
("UGEE", 40.1483333333, 44.3966666667),
("UGGG", 41.6691666667, 44.9547222222),
("UGSS", 42.8580555556, 41.1280555556),
("UHBB", 50.4216666667, 127.41),
("UHHH", 48.5277777778, 135.188333333),
("UHMA", 64.7347222222, 177.741388889),
("UHMD", 64.38, -173.246666667),
("UHMM", 59.91, 150.716666667),
("UHMP", 67.5, 171.0),
("UHPP", 53.1663888889, 158.452777778),
("UHSH", 53.515, 142.888333333),
("UHSS", 46.8886111111, 142.717222222),
("UHWW", 43.3988888889, 132.151388889),
("UIAA", 52.0261111111, 113.305),
("UIBB", 56.3708333333, 101.698611111),
("UIII", 52.2669444444, 104.394722222),
("UIUU", 51.8066666667, 107.438055556),
("UKBB", 50.345, 30.895),
("UKCC", 48.0733333333, 37.74),
("UKDD", 48.3566666667, 35.1016666667),
("UKFF", 45.0386111111, 33.9830555556),
("UKHH", 49.3588888889, 26.9338888889),
("UKKK", 50.4016666667, 30.4516666667),
("UKLL", 49.8125, 23.9561111111),
("UKLN", 48.26, 25.9816666667),
("UKOO", 46.4269444444, 30.6780555556),
("ULLI", 59.8, 30.265),
("ULMM", 68.7816666667, 32.7505555556),
("UMGG", 52.5269444444, 31.0166666667),
("UMII", 55.1666666667, 30.1333333333),
("UMKK", 54.89, 20.5925),
("UMMM", 53.8644444444, 27.5394444444),
("UMMS", 53.8822222222, 28.0305555556),
("UNAA", 53.74, 91.385),
("UNBB", 53.3638888889, 83.5419444444),
("UNEE", 55.2697222222, 86.1069444444),
("UNOO", 54.9669444444, 73.31),
("URKK", 45.035, 39.1716666667),
("URML", 42.8166666667, 47.6522222222),
("URMM", 44.225, 43.0816666667),
("URMT", 45.1091666667, 42.1127777778),
("URRR", 47.2580555556, 39.8177777778),
("URSS", 43.4458333333, 39.9475),
("URWA", 46.2833333333, 48.0061111111),
("URWW", 48.7822222222, 44.3447222222),
("USCC", 55.3033333333, 61.5066666667),
("USCM", 53.3933333333, 58.76),
("USDD", 66.59, 66.6102777778),
("USNN", 60.95, 76.4666666667),
("USPP", 57.9166666667, 56.0255555556),
("USRR", 61.25, 73.5),
("USSS", 56.7413888889, 60.8036111111),
("USTO", 37.1322222222, -92.0838888889),
("USUU", 55.475, 65.4147222222),
("UTAA", 37.9916666667, 58.3633333333),
("UTAK", 40.0333333333, 52.9833333333),
("UTAV", 39.0833333333, 63.6133333333),
("UTDD", 38.5433333333, 68.825),
("UTNN", 42.4863888889, 59.6225),
("UTSB", 39.775, 64.48),
("UTSS", 39.7005555556, 66.9847222222),
("UTST", 37.2863888889, 67.3083333333),
("UTTT", 41.2572222222, 69.2816666667),
("UUBP", 53.2141666667, 34.1763888889),
("UUEE", 55.9716666667, 37.415),
("UUEM", 56.8247222222, 35.7575),
("UUOO", 51.8141666667, 39.2297222222),
("UUWW", 55.5913888889, 37.2613888889),
("UUYY", 61.6475, 50.8455555556),
("UWKD", 55.6077777778, 49.2772222222),
("UWOO", 51.7955555556, 55.4566666667),
("UWPP", 28.9541666667, -98.5197222222),
("UWUU", 54.5575, 55.8741666667),
("UWWW", 53.5052777778, 50.1644444444),
("VAAH", 23.0747222222, 72.6316666667),
("VAAK", 20.6988888889, 77.0563888889),
("VAAU", 19.8630555556, 75.3980555556),
("VABB", 19.0886111111, 72.8677777778),
("VABI", 21.9886111111, 82.1111111111),
("VABJ", 23.2877777778, 69.67),
("VABM", 15.8591666667, 74.6180555556),
("VABO", 22.3361111111, 73.2261111111),
("VABP", 23.285, 77.3372222222),
("VABV", 21.7519444444, 72.185),
("VADN", 20.435, 72.8436111111),
("VADS", 24.2677777778, 72.2052777778),
("VAGO", 15.3805555556, 73.8330555556),
("VAID", 22.7216666667, 75.8008333333),
("VAJB", 23.1777777778, 80.0519444444),
("VAJJ", 19.0980555556, 72.8338888889),
("VAJM", 22.4663888889, 70.0113888889),
("VAKE", 23.1125, 70.1002777778),
("VAKJ", 24.8194444444, 79.9191666667),
("VAKP", 16.6638888889, 74.2880555556),
("VAKS", 21.3166666667, 70.27),
("VANP", 21.0919444444, 79.0469444444),
("VANR", 19.9625, 73.8069444444),
("VAPO", 18.5819444444, 73.9194444444),
("VAPR", 21.6494444444, 69.6563888889),
("VARK", 22.3091666667, 70.7794444444),
("VARP", 21.1802777778, 81.7386111111),
("VASL", 17.6277777778, 75.9347222222),
("VASU", 21.115, 72.7427777778),
("VAUD", 24.6175, 73.9127777778),
("VCBI", 7.18111111111, 79.8836111111),
("VCCA", 8.30111111111, 80.4280555556),
("VCCB", 7.70555555556, 81.6777777778),
("VCCC", 6.82194444444, 79.8861111111),
("VCCG", 7.3375, 81.6255555556),
("VCCJ", 9.79222222222, 80.07),
("VCCT", 8.53888888889, 81.1813888889),
("VCCW", 6.25444444444, 81.235),
("VDBG", 13.0955555556, 103.224166667),
("VDKH", 12.255, 104.563611111),
("VDPP", 11.5463888889, 104.843888889),
("VDSR", 13.4105555556, 103.812777778),
("VDST", 13.5313888889, 106.014166667),
("VEAN", 28.1747222222, 94.8019444444),
("VEAT", 23.8888888889, 91.2408333333),
("VEAZ", 23.7461111111, 92.8030555556),
("VEBD", 26.6813888889, 88.3280555556),
("VEBG", 25.2608333333, 88.7955555556),
("VEBK", 23.6433333333, 86.1486111111),
("VEBS", 20.2441666667, 85.8177777778),
("VECC", 22.6547222222, 88.4466666667),
("VECO", 26.3302777778, 89.4669444444),
("VEDB", 23.8338888889, 86.425),
("VEDZ", 27.9888888889, 94.2233333333),
("VEGK", 26.7394444444, 83.4494444444),
("VEGT", 26.1061111111, 91.5847222222),
("VEGY", 24.7480555556, 84.9425),
("VEHK", 21.5791666667, 84.0061111111),
("VEIM", 24.7597222222, 93.8969444444),
("VEJH", 21.9133333333, 84.0502777778),
("VEJP", 18.8797222222, 82.5519444444),
("VEJS", 22.8119444444, 86.1675),
("VEJT", 26.7305555556, 94.1755555556),
("VEKM", 24.135, 91.8105555556),
("VEKR", 24.3083333333, 92.0075),
("VEKU", 24.9127777778, 92.9786111111),
("VELR", 27.2905555556, 94.0966666667),
("VEMN", 27.4833333333, 95.0175),
("VEMZ", 26.12, 85.3130555556),
("VENP", 20.87, 82.5194444444),
("VEPG", 28.0661111111, 95.3355555556),
("VEPH", 23.4744444444, 87.4277777778),
("VEPT", 25.5905555556, 85.0877777778),
("VEPU", 25.76, 87.4091666667),
("VERC", 23.3147222222, 85.3213888889),
("VERK", 22.2563888889, 84.8144444444),
("VEUK", 20.0975, 83.1833333333),
("VEZO", 27.5880555556, 93.8283333333),
("VGCB", 21.4519444444, 91.9641666667),
("VGEG", 22.2494444444, 91.8130555556),
("VGIS", 24.1525, 89.0494444444),
("VGJR", 23.1836111111, 89.1608333333),
("VGRJ", 24.4369444444, 88.6163888889),
("VGSD", 25.7591666667, 88.9086111111),
("VGSY", 24.9627777778, 91.8672222222),
("VGTJ", 23.7783333333, 90.3825),
("VGZR", 23.8433333333, 90.3977777778),
("VHHH", 22.3088888889, 113.914444444),
("VHSK", 22.4363888889, 114.080277778),
("VIAG", 27.1555555556, 77.9608333333),
("VIAL", 25.4388888889, 81.7341666667),
("VIAR", 31.7075, 74.7991666667),
("VIBK", 28.0725, 73.2066666667),
("VIBL", 26.9883333333, 80.8930555556),
("VIBN", 25.4519444444, 82.8588888889),
("VIBR", 31.8816666667, 77.1844444444),
("VIBT", 30.27, 74.7555555556),
("VIBW", 28.8375, 76.1775),
("VIBY", 28.4222222222, 79.4497222222),
("VICG", 30.6733333333, 76.7883333333),
("VICX", 26.4041666667, 80.41),
("VIDD", 28.5841666667, 77.2052777778),
("VIDN", 30.1872222222, 78.18),
("VIDP", 28.5663888889, 77.1030555556),
("VIGN", 24.6544444444, 77.3472222222),
("VIGR", 26.2938888889, 78.2275),
("VIHR", 29.1805555556, 75.7530555556),
("VIJN", 25.4897222222, 78.5594444444),
("VIJO", 26.2513888889, 73.0480555556),
("VIJP", 26.8238888889, 75.8097222222),
("VIJR", 26.8891666667, 70.8644444444),
("VIJU", 32.6897222222, 74.8380555556),
("VIKA", 26.4411111111, 80.3633333333),
("VIKO", 25.1605555556, 75.8447222222),
("VILD", 30.8544444444, 75.9511111111),
("VILH", 34.1355555556, 77.5455555556),
("VILK", 26.7605555556, 80.8863888889),
("VIPK", 32.2336111111, 75.6344444444),
("VIPL", 30.315, 76.3633333333),
("VIPT", 29.0330555556, 79.4736111111),
("VIRB", 26.2502777778, 81.3805555556),
("VISM", 31.0816666667, 77.0580555556),
("VISP", 29.9941666667, 77.4241666667),
("VISR", 33.9866666667, 74.7736111111),
("VLHS", 20.2572222222, 100.437222222),
("VLLB", 19.8972222222, 102.160833333),
("VLPS", 15.1319444444, 105.781388889),
("VLPV", 19.4547222222, 103.218055556),
("VLSK", 16.5563888889, 104.759444444),
("VLVT", 17.9880555556, 102.563055556),
("VMMC", 22.1494444444, 113.591388889),
("VNBW", 27.5055555556, 83.4161111111),
("VNCG", 26.5705555556, 88.0794444444),
("VNJP", 26.7086111111, 85.9222222222),
("VNKT", 27.6963888889, 85.3591666667),
("VNNG", 28.1036111111, 81.6669444444),
("VNPK", 28.2002777778, 83.9811111111),
("VNSI", 27.1594444444, 84.98),
("VNVT", 26.4833333333, 87.2636111111),
("VOAT", 10.8233333333, 72.1755555556),
("VOBG", 12.9497222222, 77.6680555556),
("VOBR", 17.9077777778, 77.4858333333),
("VOBZ", 16.5302777778, 80.7966666667),
("VOCB", 11.0313888889, 77.0438888889),
("VOCL", 11.1355555556, 75.9547222222),
("VOCP", 14.5097222222, 78.7727777778),
("VOCX", 9.15305555556, 92.8191666667),
("VODG", 17.6291666667, 78.4033333333),
("VOHY", 17.4522222222, 78.4611111111),
("VOMD", 9.83444444444, 78.0933333333),
("VOML", 12.96, 74.8925),
("VOMM", 12.9941666667, 80.1802777778),
("VONS", 16.5408333333, 79.3177777778),
("VOPB", 11.6455555556, 92.7330555556),
("VORY", 17.1094444444, 81.8183333333),
("VOSM", 11.7819444444, 78.0652777778),
("VOTJ", 10.7197222222, 79.1036111111),
("VOTP", 13.6319444444, 79.5430555556),
("VOTR", 10.7652777778, 78.7088888889),
("VOTV", 8.48194444444, 76.9180555556),
("VOTX", 12.9066666667, 80.1211111111),
("VOWA", 17.9180555556, 79.5986111111),
("VQPR", 27.4030555556, 89.4258333333),
("VRMM", 4.19166666667, 73.5288888889),
("VTBD", 13.9125, 100.606666667),
("VTBK", 14.1019444444, 99.9169444444),
("VTBL", 14.8744444444, 100.663333333),
("VTBU", 12.6797222222, 101.005),
("VTBW", 13.7686111111, 102.315277778),
("VTCB", 19.4972222222, 100.285555556),
("VTCL", 18.2719444444, 99.5038888889),
("VTCP", 18.1319444444, 100.164444444),
("VTPH", 12.6338888889, 99.9508333333),
("VTPI", 15.2772222222, 100.295833333),
("VTPL", 16.8208333333, 101.253888889),
("VTPM", 16.6997222222, 98.545),
("VTPN", 15.6727777778, 100.136666667),
("VTPP", 16.7827777778, 100.278888889),
("VTPU", 17.6736111111, 100.234722222),
("VTPY", 17.2341666667, 99.0577777778),
("VTSC", 6.51972222222, 101.743333333),
("VTSG", 8.09888888889, 98.9861111111),
("VTSH", 7.18638888889, 100.607777778),
("VTSK", 6.78527777778, 101.153333333),
("VTSM", 9.54777777778, 100.062222222),
("VTSN", 8.47111111111, 99.9555555556),
("VTSP", 8.11305555556, 98.3166666667),
("VTSR", 9.7775, 98.5852777778),
("VTSS", 6.93305555556, 100.392777778),
("VTST", 7.50861111111, 99.6163888889),
("VTSY", 6.52666666667, 101.241944444),
("VTUD", 17.3863888889, 102.788055556),
("VTUI", 17.195, 104.118611111),
("VTUJ", 14.8691666667, 103.488888889),
("VTUL", 17.4388888889, 101.721944444),
("VTUN", 14.9341666667, 102.078611111),
("VTUP", 17.4125, 104.777222222),
("VTUW", 17.3836111111, 104.642777778),
("VVDN", 16.0438888889, 108.199166667),
("VVNB", 21.2216666667, 105.805555556),
("VVNT", 12.2180555556, 109.2),
("VVTS", 10.82, 106.661666667),
("VYBG", 21.1819444444, 94.9291666667),
("VYBM", 24.2688888889, 97.2461111111),
("VYCI", 14.1413888889, 93.3683333333),
("VYDW", 14.0980555556, 98.2016666667),
("VYHH", 20.7430555556, 96.7916666667),
("VYKG", 21.3005555556, 99.6366666667),
("VYKL", 23.1886111111, 94.0508333333),
("VYKP", 19.4266666667, 93.5344444444),
("VYKT", 10.0491666667, 98.5377777778),
("VYLK", 19.6922222222, 97.2147222222),
("VYLS", 22.9775, 97.7522222222),
("VYLY", 20.9402777778, 94.8225),
("VYMD", 21.7019444444, 95.9777777778),
("VYME", 12.4433333333, 98.6211111111),
("VYMK", 25.3825, 97.3527777778),
("VYMM", 16.4447222222, 97.6605555556),
("VYMO", 23.0925, 96.645),
("VYMS", 20.5166666667, 99.2566666667),
("VYNP", 25.3541666667, 97.295),
("VYNS", 20.89, 97.7361111111),
("VYPA", 16.8944444444, 97.6752777778),
("VYPN", 16.8127777778, 94.7752777778),
("VYPT", 27.3297222222, 97.4261111111),
("VYPY", 18.8244444444, 95.2658333333),
("VYST", 20.9416666667, 95.9144444444),
("VYSW", 20.1325, 92.8725),
("VYTD", 18.4605555556, 94.2994444444),
("VYTL", 20.4836111111, 99.9352777778),
("VYTO", 19.0311111111, 96.4013888889),
("VYYY", 16.9072222222, 96.1330555556),
("WAAA", -5.06166666667, 119.553888889),
("WAAL", -4.31666666667, 122.466666667),
("WAAM", -2.55777777778, 120.324166667),
("WAAS", -2.52944444444, 121.357222222),
("WAAT", -3.045, 119.821666667),
("WAAU", -4.08222222222, 122.416666667),
("WABB", -1.19, 136.107777778),
("WABI", -3.36805555556, 135.496388889),
("WABP", -4.52805555556, 136.887222222),
("WAJJ", -2.57694444444, 140.516111111),
("WAJW", -4.09611111111, 138.9525),
("WAKK", -8.52027777778, 140.418333333),
("WAMG", 0.636666666667, 122.851944444),
("WAMH", 3.68305555556, 125.527777778),
("WAML", -0.918333333333, 119.909444444),
("WAMM", 1.54916666667, 124.926388889),
("WAMP", -1.41666666667, 120.6575),
("WAMT", 0.831944444444, 127.380555556),
("WAMW", -1.03888888889, 122.771666667),
("WAPL", -5.66138888889, 132.731388889),
("WAPP", -3.70888888889, 128.09),
("WASK", -3.64388888889, 133.695277778),
("WASR", -0.88, 134.050277778),
("WASS", -0.925833333333, 131.12),
("WBGB", 3.17222222222, 113.044444444),
("WBGG", 1.48472222222, 110.346388889),
("WBGM", 4.17805555556, 114.331388889),
("WBGR", 4.325, 113.988333333),
("WBGS", 2.26388888889, 111.982777778),
("WBKD", 5.03222222222, 118.323888889),
("WBKK", 5.93722222222, 116.051111111),
("WBKL", 5.30055555556, 115.25),
("WBKW", 4.31333333333, 118.121944444),
("WBSB", 4.94555555556, 114.927777778),
("WIAA", 5.87388888889, 95.3394444444),
("WIAM", -7.34555555556, 108.246388889),
("WIAR", -7.61583333333, 111.433888889),
("WIAS", -7.92611111111, 112.713888889),
("WIBB", 0.461111111111, 101.444444444),
("WIBD", 1.60916666667, 101.433333333),
("WIIA", -6.29305555556, 106.569722222),
("WIIB", -6.90055555556, 107.576111111),
("WIIC", -6.75583333333, 108.539444444),
("WIIH", -6.26638888889, 106.891111111),
("WIII", -6.12555555556, 106.655833333),
("WIIJ", -7.78805555556, 110.431944444),
("WIIL", -7.645, 109.033888889),
("WIIP", -6.33694444444, 106.764444444),
("WIIS", -6.97305555556, 110.375277778),
("WIIT", -5.24222222222, 105.178888889),
("WIKB", 1.12083333333, 104.118611111),
("WIKD", -2.74555555556, 107.754722222),
("WIKK", -2.16277777778, 106.139166667),
("WIKN", 0.9225, 104.532222222),
("WIKS", -0.479166666667, 104.579166667),
("WIMB", 1.16638888889, 97.7027777778),
("WIME", 1.39916666667, 99.4319444444),
("WIMG", -0.876111111111, 100.3525),
("WIMM", 3.55805555556, 98.6716666667),
("WIMS", 1.55472222222, 98.8902777778),
("WIOG", -0.348611111111, 111.7475),
("WIOK", -1.81638888889, 109.963333333),
("WION", 3.90861111111, 108.387777778),
("WIOO", -0.150555555556, 109.403888889),
("WIOP", 0.835833333333, 112.935555556),
("WIOS", 0.0636111111111, 111.474722222),
("WIPA", -1.63777777778, 103.644166667),
("WIPL", -3.86388888889, 102.340833333),
("WIPP", -2.89777777778, 104.701111111),
("WIPQ", -3.28583333333, 103.879444444),
("WIPR", -0.352777777778, 102.334722222),
("WIPU", -2.54222222222, 101.088333333),
("WITL", 5.06944444444, 97.2591666667),
("WITM", 5.22666666667, 96.9502777778),
("WITT", 5.52333333333, 95.4202777778),
("WMAP", 2.04138888889, 103.307222222),
("WMBT", 2.81805555556, 104.16),
("WMKA", 6.19388888889, 100.4025),
("WMKB", 5.46583333333, 100.391111111),
("WMKC", 6.16638888889, 102.293611111),
("WMKD", 3.77527777778, 103.208888889),
("WMKE", 4.53722222222, 103.426388889),
("WMKF", 3.11222222222, 101.7025),
("WMKI", 4.56777777778, 101.091944444),
("WMKJ", 1.64111111111, 103.669444444),
("WMKK", 2.74555555556, 101.709722222),
("WMKL", 6.32972222222, 99.7286111111),
("WMKM", 2.26333333333, 102.251388889),
("WMKN", 5.3825, 103.103333333),
("WMKP", 5.29694444444, 100.276666667),
("WPDB", -9.30305555556, 125.286666667),
("WPEC", -8.48527777778, 126.398888889),
("WRBB", -3.44194444444, 114.761111111),
("WRBC", -3.4125, 115.995555556),
("WRBI", -2.705, 111.673055556),
("WRBP", -2.225, 113.9425),
("WRBS", -2.50111111111, 112.976944444),
("WRKC", -8.64055555556, 122.236666667),
("WRKE", -8.84888888889, 121.661944444),
("WRKG", -8.59555555556, 120.4775),
("WRKK", -10.1713888889, 123.671111111),
("WRKO", -8.48611111111, 119.889166667),
("WRLK", 2.15444444444, 117.432222222),
("WRLL", -1.26805555556, 116.894444444),
("WRLR", 3.32638888889, 117.566388889),
("WRLS", -0.484444444444, 117.156944444),
("WRLT", -0.0930555555556, 117.439166667),
("WRRA", -8.56055555556, 116.094444444),
("WRRB", -8.53944444444, 118.687222222),
("WRRR", -8.74833333333, 115.167222222),
("WRRS", -8.48888888889, 117.411944444),
("WRRT", -9.40944444444, 119.246111111),
("WRRW", -9.67, 120.303611111),
("WRSJ", -7.37972222222, 112.786666667),
("WRSQ", -7.51583333333, 110.756666667),
("WSAG", 1.42361111111, 103.811388889),
("WSAP", 1.36027777778, 103.909444444),
("WSAT", 1.38722222222, 103.708611111),
("WSSL", 1.41694444444, 103.8675),
("WSSS", 1.35555555556, 103.987222222),
("YBAF", -27.5702777778, 153.008055556),
("YBAM", -10.9508333333, 142.459444444),
("YBAS", -23.8069444444, 133.902222222),
("YBBN", -27.3841666667, 153.1175),
("YBCG", -28.1644444444, 153.504722222),
("YBCS", -16.8858333333, 145.755277778),
("YBCV", -26.4133333333, 146.2625),
("YBMA", -20.6638888889, 139.488611111),
("YBMC", -26.6033333333, 153.091111111),
("YBMK", -21.1716666667, 149.179722222),
("YBOK", -27.4113888889, 151.735277778),
("YBPN", -20.495, 148.552222222),
("YBRK", -23.3819444444, 150.475277778),
("YBTL", -19.2525, 146.765277778),
("YBWP", -12.6786111111, 141.925277778),
("YMAV", -38.0394444444, 144.469444444),
("YMAY", -36.0677777778, 146.958055556),
("YMEN", -37.7280555556, 144.901944444),
("YMES", -38.0988888889, 147.149444444),
("YMHB", -42.8361111111, 147.510277778),
("YMLT", -41.5452777778, 147.214166667),
("YMMB", -37.9758333333, 145.102222222),
("YMML", -37.6733333333, 144.843333333),
("YMPC", -37.9322222222, 144.753333333),
("YPAD", -34.945, 138.530555556),
("YPAG", -32.5069444444, 137.716666667),
("YPED", -34.7025, 138.620833333),
("YPJT", -32.0972222222, 115.881111111),
("YPKA", -20.7122222222, 116.773333333),
("YPKG", -30.7894444444, 121.461666667),
("YPKU", -15.7780555556, 128.7075),
("YPLM", -22.2355555556, 114.088611111),
("YPPD", -20.3777777778, 118.626388889),
("YPPF", -34.7933333333, 138.633055556),
("YPPH", -31.9402777778, 115.966944444),
("YPWR", -31.1441666667, 136.816944444),
("YPXM", -10.4505555556, 105.690277778),
("YSBK", -33.9244444444, 150.988333333),
("YSCB", -35.3083333333, 149.193888889),
("YSCH", -30.3205555556, 153.116388889),
("YSCN", -34.0402777778, 150.687222222),
("YSDU", -32.2166666667, 148.574722222),
("YSNF", -29.0413888889, 167.938611111),
("YSRI", -33.6005555556, 150.780833333),
("YSSY", -33.9461111111, 151.177222222),
("YSTW", -31.0838888889, 150.846666667),
("YSWG", -35.1652777778, 147.466388889),
("ZBAA", 40.08, 116.584444444),
("ZBHH", 40.8533333333, 111.821666667),
("ZBLA", 49.205, 119.825),
("ZBSJ", 38.2805555556, 114.696388889),
("ZBTJ", 39.1238888889, 117.346111111),
("ZBYN", 37.7466666667, 112.628611111),
("ZGGG", 23.1841666667, 113.265833333),
("ZGHA", 28.1888888889, 113.219444444),
("ZGKL", 25.2177777778, 110.039166667),
("ZGNN", 22.6080555556, 108.172222222),
("ZGOW", 23.4, 116.683333333),
("ZGSZ", 22.6394444444, 113.812222222),
("ZHCC", 34.5194444444, 113.840833333),
("ZHHH", 30.7836111111, 114.208055556),
("ZHYC", 30.5522222222, 111.468611111),
("ZKPY", 39.0333333333, 125.783333333),
("ZLLL", 36.5166666667, 103.621666667),
("ZLXY", 34.4458333333, 108.7525),
("ZMUB", 47.8430555556, 106.766388889),
("ZPJH", 21.975, 100.76),
("ZPPP", 24.9922222222, 102.743333333),
("ZSAM", 24.5438888889, 118.1275),
("ZSCN", 28.6, 115.916666667),
("ZSFZ", 25.9333333333, 119.661666667),
("ZSHC", 30.2283333333, 120.431666667),
("ZSNB", 29.8247222222, 121.465),
("ZSNJ", 31.74, 118.86),
("ZSOF", 31.78, 117.298333333),
("ZSQD", 36.2625, 120.375277778),
("ZSSS", 31.1977777778, 121.336111111),
("ZSYT", 37.4016666667, 121.371666667),
("ZUCK", 29.7188888889, 106.641388889),
("ZUUU", 30.5783333333, 103.946944444),
("ZUXC", 27.9886111111, 102.184166667),
("ZWSH", 39.5433333333, 76.0216666667),
("ZWTN", 37.0377777778, 79.8658333333),
("ZWWW", 43.9069444444, 87.4741666667),
("ZYHB", 45.6233333333, 126.250277778),
("ZYJM", 46.8433333333, 130.465277778),
("ZYMD", 44.5238888889, 129.568888889),
("ZYTL", 38.9655555556, 121.538333333),
("ZYYJ", 42.8816666667, 129.448333333)
)
|
NeoMahler/rc-vikidia
|
icao.py
|
Python
|
mit
| 180,282
|
[
"FEFF"
] |
73e2309227401ef20f9014448325a4fc7283fda925798adbbb68751d2a8e1d6c
|
# -*- coding: utf-8 -*-
"""OpenERP addons manifest aka __openerp__.py checker"""
import os
try:
import ast
from ast import NodeVisitor
except ImportError:
from flake8.util import ast
from ast import NodeVisitor
from common_checker.base_checker import BaseChecker
AGPL = 'AGPL-3'
ALLOWED_KEYS = ['name', 'version', 'author',
'maintainer', 'category', 'complexity',
'depends', 'description', 'website',
'summary', 'external_dependencies',
'data', 'demo', 'test',
'installable', 'auto_install',
'license', 'application',
'images', 'icon', 'web',
'js', 'css', 'qweb']
class OpenERPManifestChecker(BaseChecker, ast.NodeVisitor):
"""ast.NodeVisitor subclass that check root ast.node.
It checks class validity
Please take look at ast.Node visitor for more information
about visit/visitor behavior
"""
O600 = 'Warning unknown Manifest key (\'%s\')'
O601 = 'Manifest "name" key is missing'
O602 = 'Manifest "description" key is missing'
O603 = 'Manifest "license" key is missing'
O604 = 'Manifest license should be %s' % AGPL
O605 = 'Manifest author key is missing'
O606 = 'Manifest version key is missing'
O607 = 'Manifest version is incorrect'
O608 = 'Manifest website key is missing'
O609 = 'Manifest init key is deprecated use data'
O610 = 'Manifest update key is deprecated use data'
O611 = 'Manifest application key is missing'
O612 = 'Manifest complexity key is missing'
O613 = ('Manifest complexity key is invalid it should be'
' easy, normal, or expert')
O614 = 'Manifest data key is missing'
O615 = 'Manifest data key should be a list'
O616 = 'Manifest depends key should be a list'
O617 = 'Manifest installable key is set to False'
O618 = 'Manifest installable key should be a boolean'
O619 = 'Manifest is not a valid RST'
def make_error_tuple(self, code, node, *str_format):
"""Make an error tuple used by flake8
Uses input code to find corressponding property lookup
:param code: string of code number must be set as propety
:param node: ast node source of error
:param str_format: optional arguments for string formatting
:returns: (line number, col, text, type)
"""
code_text = ('%s %s' % (code, getattr(self, code))) % str_format
return (node.lineno, node.col_offset, code_text, type(self))
def generic_visit(self, node):
"""Refer to Python ast NodeVisitor documentation"""
# We have also to be compatible with editor mode that
# create temp
# TODO find a better way to ensure that is a manifest file
if '__openerp__' in os.path.basename(self.filename):
return ast.NodeVisitor.generic_visit(self, node)
def make_dict(self, node):
"""Transforms node to real Python dict
:param node: ast.node representing a dict
:returns: a evaluated dict
"""
res = ast.literal_eval(node)
if not isinstance(res, dict):
raise ValueError('Manifest node is not a dict')
return res
def ensure_key(self, node, manifest_dict, keyname, code):
"""Check if key is present"""
if keyname not in manifest_dict:
self.errors.append(self.make_error_tuple(code, node))
def check_allowed_keys(self, node, manifest_dict):
"""Ensure code 600
Check if manifest dict has a unknown key
"""
for key in manifest_dict:
if key not in ALLOWED_KEYS:
faulty_node = next(x for x in node.keys
if ast.literal_eval(x) == key)
self.errors.append(self.make_error_tuple('O600', faulty_node,
key))
def get_nodes_from_key(self, dict_node, lk_key):
index = 0
for key in dict_node.keys:
if ast.literal_eval(key) == lk_key:
return (key, dict_node.values[index])
index += 1
return None, None
def check_license_value(self, node, manifest_dict):
"""Check if license is AGPL, if it exists"""
# Default to AGPL since, we don't need to
# report a bad license if it doesn't exist
# instead it will be reported by 0603
if manifest_dict.get('license', AGPL) != AGPL:
key, val = self.get_nodes_from_key(node, 'license')
self.errors.append(self.make_error_tuple('O604', val))
def visit_Dict(self, node):
"""Visits and validate orm.Model definition"""
manifest_dict = self.make_dict(node)
self.check_allowed_keys(node, manifest_dict)
self.ensure_key(node, manifest_dict, 'name', 'O601')
self.ensure_key(node, manifest_dict, 'description', 'O602')
self.ensure_key(node, manifest_dict, 'license', 'O603')
self.check_license_value(node, manifest_dict)
|
nbessi/openerp-conventions
|
v7/manifest_checker.py
|
Python
|
mit
| 5,069
|
[
"VisIt"
] |
5bd61bb0bed11416664835795d94904f7b8663d24dcde20909e5f04cd330837a
|
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2020-2021 Dominik Kriegner <dominik.kriegner@gmail.com>
# Copyright (C) 2020 Mike Moron <mike.moron@tu-dortmund.de>
import matplotlib.pyplot as plt
import numpy as np
import xrayutilities as xu
# the script below currently only works for Amorphous materials.
# file an issue on github if you need/want this for Crystal objects
# create a fictitious LayerStack with thin, rough Layers to illustrate the
# difference between the slicing approach and the usual layered approach
Si = xu.materials.Amorphous('Si', 2285)
SiO2 = xu.materials.Amorphous('SiO2', 1000)
C = xu.materials.Amorphous('C', 800)
s = xu.simpack.Layer(Si, np.inf, roughness=20)
l1 = xu.simpack.Layer(SiO2, 35, roughness=15)
l2 = xu.simpack.Layer(C, 15, roughness=8)
ls = s + l1 + l2
# conventional X-ray reflectivity modelling
m = xu.simpack.SpecularReflectivityModel(ls)
pos, eldens, layer_eldens = m.densityprofile(500, individual_layers=True)
# slice the layerstack into an Amorphous sublayer at every 0.1 angstrom.
# at the top a vacuum layer is added
sls = xu.simpack.effectiveDensitySlicing(ls, 0.1)
ms = xu.simpack.SpecularReflectivityModel(sls)
spos, seldens = ms.densityprofile(500)
# perform simulation and plot simulation and density profile
alpha = np.linspace(0., 5., num=500)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogy(alpha, m.simulate(alpha), label='conventional XRR')
ax.semilogy(alpha, ms.simulate(alpha), label='sliced XRR')
ax.set_xlabel(r'incidence angle')
ax.set_ylabel(r'reflectivity')
ax.legend()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(pos, eldens, '.-', label='conventional')
for i in range(len(layer_eldens)):
ax.plot(pos, layer_eldens[i], ':')
ax.plot(spos, seldens, '.-', label='sliced') # arbitrary shift for vis.
ax.legend()
ax.set_xlabel(r'z-position')
ax.set_ylabel(r'electron density')
plt.show()
|
dkriegner/xrayutilities
|
examples/simpack_xrr_effectivedensityslicing.py
|
Python
|
gpl-2.0
| 2,533
|
[
"CRYSTAL"
] |
c5278d8c7814947dab3dbaba1cef1cd5d1b50d38edf03d77443b258c15058fff
|
""" The TimeLeft utility allows to calculate the amount of CPU time
left for a given batch system slot. This is essential for the 'Filling
Mode' where several VO jobs may be executed in the same allocated slot.
The prerequisites for the utility to run are:
- Plugin for extracting information from local batch system
- Scale factor for the local site.
With this information the utility can calculate in normalized units the
CPU time remaining for a given slot.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import shlex
import DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import systemCall
class TimeLeft(object):
""" This generally does not run alone
"""
def __init__(self):
""" Standard constructor
"""
self.log = gLogger.getSubLogger('TimeLeft')
# This is the ratio SpecInt published by the site over 250 (the reference used for Matching)
self.scaleFactor = gConfig.getValue('/LocalSite/CPUScalingFactor', 0.0)
if not self.scaleFactor:
self.log.warn('/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName())
self.normFactor = gConfig.getValue('/LocalSite/CPUNormalizationFactor', 0.0)
if not self.normFactor:
self.log.warn('/LocalSite/CPUNormalizationFactor not defined for site %s' % DIRAC.siteName())
result = self.__getBatchSystemPlugin()
if result['OK']:
self.batchPlugin = result['Value']
else:
self.batchPlugin = None
self.batchError = result['Message']
def getScaledCPU(self, processors=1):
""" Returns the current CPU Time spend (according to batch system) scaled according
to /LocalSite/CPUScalingFactor
"""
# Quit if no scale factor available
if not self.scaleFactor:
return 0
# Quit if Plugin is not available
if not self.batchPlugin:
return 0
resourceDict = self.batchPlugin.getResourceUsage()
if 'Value' in resourceDict:
if resourceDict['Value'].get('CPU'):
return resourceDict['Value']['CPU'] * self.scaleFactor
elif resourceDict['Value'].get('WallClock'):
# When CPU value missing, guess from WallClock and number of processors
return resourceDict['Value']['WallClock'] * self.scaleFactor * processors
return 0
def getTimeLeft(self, cpuConsumed=0.0, processors=1):
""" Returns the CPU Time Left for supported batch systems.
The CPUConsumed is the current raw total CPU.
"""
# Quit if no scale factor available
if not self.scaleFactor:
return S_ERROR('/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName())
if not self.batchPlugin:
return S_ERROR(self.batchError)
resourceDict = self.batchPlugin.getResourceUsage()
if not resourceDict['OK']:
self.log.warn('Could not determine timeleft for batch system at site %s' % DIRAC.siteName())
return resourceDict
resources = resourceDict['Value']
self.log.debug("self.batchPlugin.getResourceUsage(): %s" % str(resources))
if not resources.get('CPULimit') and not resources.get('WallClockLimit'):
# This should never happen
return S_ERROR('No CPU or WallClock limit obtained')
# if one of CPULimit or WallClockLimit is missing, compute a reasonable value
if not resources.get('CPULimit'):
resources['CPULimit'] = resources['WallClockLimit'] * processors
elif not resources.get('WallClockLimit'):
resources['WallClockLimit'] = resources['CPULimit'] / processors
# if one of CPU or WallClock is missing, compute a reasonable value
if not resources.get('CPU'):
resources['CPU'] = resources['WallClock'] * processors
elif not resources.get('WallClock'):
resources['WallClock'] = resources['CPU'] / processors
timeLeft = 0.
cpu = float(resources['CPU'])
cpuLimit = float(resources['CPULimit'])
wallClock = float(resources['WallClock'])
wallClockLimit = float(resources['WallClockLimit'])
batchSystemTimeUnit = resources.get('Unit', 'Both')
# Some batch systems rely on wall clock time and/or cpu time to make allocations
if batchSystemTimeUnit == 'WallClock':
time = wallClock
timeLimit = wallClockLimit
else:
time = cpu
timeLimit = cpuLimit
if time and cpuConsumed > 3600. and self.normFactor:
# If there has been more than 1 hour of consumed CPU and
# there is a Normalization set for the current CPU
# use that value to renormalize the values returned by the batch system
# NOTE: cpuConsumed is non-zero for call by the JobAgent and 0 for call by the watchdog
# cpuLimit and cpu may be in the units of the batch system, not real seconds...
# (in this case the other case won't work)
# therefore renormalise it using cpuConsumed (which is in real seconds)
cpuWorkLeft = (timeLimit - time) * self.normFactor * cpuConsumed / time
elif self.normFactor:
# FIXME: this is always used by the watchdog... Also used by the JobAgent
# if consumed less than 1 hour of CPU
# It was using self.scaleFactor but this is inconsistent: use the same as above
# In case the returned cpu and cpuLimit are not in real seconds, this is however rubbish
cpuWorkLeft = (timeLimit - time) * self.normFactor
else:
# Last resort recovery...
cpuWorkLeft = (timeLimit - time) * self.scaleFactor
self.log.verbose('Remaining CPU in normalized units is: %.02f' % timeLeft)
return S_OK(cpuWorkLeft)
def __getBatchSystemPlugin(self):
""" Using the name of the batch system plugin, will return an instance of the plugin class.
"""
batchSystems = {
'LSF': 'LSB_JOBID',
'PBS': 'PBS_JOBID',
'BQS': 'QSUB_REQNAME',
'SGE': 'SGE_TASK_ID',
'SLURM': 'SLURM_JOB_ID',
'HTCondor': '_CONDOR_JOB_AD'} # more to be added later
name = None
for batchSystem, envVar in batchSystems.items():
if envVar in os.environ:
name = batchSystem
break
if name is None and 'MACHINEFEATURES' in os.environ and 'JOBFEATURES' in os.environ:
# Only use MJF if legacy batch system information not available for now
name = 'MJF'
if name is None:
self.log.warn('Batch system type for site %s is not currently supported' % DIRAC.siteName())
return S_ERROR('Current batch system is not supported')
self.log.debug('Creating plugin for %s batch system' % (name))
try:
batchSystemName = "%sResourceUsage" % (name)
batchPlugin = __import__('DIRAC.Resources.Computing.BatchSystems.TimeLeft.%s' % # pylint: disable=unused-variable
batchSystemName, globals(), locals(), [batchSystemName])
except ImportError as x:
msg = 'Could not import DIRAC.Resources.Computing.BatchSystems.TimeLeft.%s' % (batchSystemName)
self.log.warn(x)
self.log.warn(msg)
return S_ERROR(msg)
try:
batchStr = 'batchPlugin.%s()' % (batchSystemName)
batchInstance = eval(batchStr)
except Exception as x: # pylint: disable=broad-except
msg = 'Could not instantiate %s()' % (batchSystemName)
self.log.warn(x)
self.log.warn(msg)
return S_ERROR(msg)
return S_OK(batchInstance)
#############################################################################
def runCommand(cmd, timeout=120):
""" Wrapper around systemCall to return S_OK(stdout) or S_ERROR(message)
"""
result = systemCall(timeout=timeout, cmdSeq=shlex.split(cmd))
if not result['OK']:
return result
status, stdout, stderr = result['Value'][0:3]
if status:
gLogger.warn('Status %s while executing %s' % (status, cmd))
gLogger.warn(stderr)
if stdout:
return S_ERROR(stdout)
if stderr:
return S_ERROR(stderr)
return S_ERROR('Status %s while executing %s' % (status, cmd))
else:
return S_OK(str(stdout))
|
yujikato/DIRAC
|
src/DIRAC/Resources/Computing/BatchSystems/TimeLeft/TimeLeft.py
|
Python
|
gpl-3.0
| 8,046
|
[
"DIRAC"
] |
b423af86c57d5e87fc9cf963665d106e1a2c80b4aaf76ca49ee7d4dc616fe485
|
##############################################################################
##############################################################################
# Default settings and helpers for
# Maximum-likelihood inference
#
# Copyright (c) 2016 Johan Dahlin
# liu (at) johandahlin.com
#
##############################################################################
##############################################################################
import numpy as np
import pandas
import os
##############################################################################
# Set default settings if needed
##############################################################################
def setSettings(ml, vers):
if (ml.initPar == None):
raise NameError(
"ml-opt (initPar): no initial parameters given, exiting...")
if (ml.dataset == None):
ml.dataset = 0
print("ml-opt (dataset): no number of data set given, defaulting to " +
str(ml.dataset) + ".")
if (ml.filePrefix == None):
ml.filePrefix = "model"
print("ml-opt (filePrefix): no short name for model given, defaulting to " +
str(ml.filePrefix) + ".")
#=====================================================================
# Settings for the SPSA algorithm
#=====================================================================
if (vers == "spsa"):
if (ml.verbose == None):
print("ml-spsa (verbose): defaulting to verbose algorithm.")
ml.verbose = True
if (ml.tolLevel == None):
ml.tolLevel = 1e-6
print("ml-spsa (tolLevel): defaulting to " +
str(ml.tolLevel) + " as tolerance level.")
if (ml.noisyTolLevel == None):
ml.noisyTolLevel = 5
print("ml-spsa (noisyTolLevel): defaulting to checking tolerance level over the last " +
str(ml.noisyTolLevel) + " iterations.")
if (ml.maxIter == None):
ml.maxIter = 100
print("ml-spsa (maxIter): defaulting to " +
str(ml.maxIter) + " as maximum no iterations.")
if (ml.alpha == None):
ml.alpha = 0.602
print("ml-spsa (alpha): defaulting to alpha: " +
str(ml.alpha) + " as suggested in Spall(1998).")
if (ml.gamma == None):
ml.gamma = 0.101
print("ml-spsa (gamma): defaulting to gamma: " +
str(ml.gamma) + " as suggested in Spall(1998).")
if (ml.A == None):
ml.A = 0.10 * ml.maxIter
print("ml-spsa (A): defaulting to A: " + str(ml.A) +
" as 10% of maxIter as suggested in Spall(1998).")
#=====================================================================
# Settings for the GPO algorithm
#=====================================================================
if (vers == "gpo"):
if (ml.maxIter == None):
ml.maxIter = 100
print("gpo (maxIter): defaulting to " +
str(ml.maxIter) + " as maximum no iterations.")
if (ml.verbose == None):
print("gpo (verbose): defaulting to verbose algorithm.")
ml.verbose = True
if (ml.tolLevel == None):
ml.tolLevel = 1e-3
print("gpo (tolLevel): defaulting to " +
str(ml.tolLevel) + " as tolerance level for AQ.")
if (ml.epsilon == None):
ml.epsilon = 1e-2
print("gpo (epsilon): defaulting to " +
str(ml.epsilon) + " as epsilon.")
if (ml.preIter == None):
ml.preIter = 50
print("gpo (preIter): defaulting to " + str(ml.preIter) +
" pre-iterations to estimate hyperparameters.")
if (ml.upperBounds == None):
raise NameError(
"gpo (upperBounds): no upper parameter bounds (upperBounds) given.")
if (ml.lowerBounds == None):
raise NameError(
"gpo (lowerBounds): no lower parameter bounds (lowerBounds) given.")
if (ml.EstimateHyperparametersInterval == None):
ml.EstimateHyperparametersInterval = 10000
print("gpo (EstimateHyperparametersInterval): defaulting to not updating hyperparameters for pre-iterations.")
if (ml.AQfunction == None):
ml.AQfunction = ml.aq_ei
print(
"gpo (AQfunction): defaulting using expected improvement (EI) as aquisition function.")
if (ml.EstimateThHatEveryIteration == None):
ml.EstimateThHatEveryIteration = True
print("gpo (EstimateThHatEveryIteration): defaulting to estimate parameters at every iteration (set EstimateThHatEveryIteration to FALSE for speedup).")
if (ml.jitterParameters == None):
ml.jitterParameters = True
print("gpo (jitterParameters): defaulting to jitter parameters.")
if ((ml.jitteringCovariance == None) & (ml.jitterParameters == True)):
tmp = 0.01
ml.jitteringCovariance = tmp * np.diag(np.ones(ml.nPars))
print("gpo (jitteringCovariance): defaulting to jitter parameters with Gaussian noise with variance " + str(tmp) + ".")
if (ml.preSamplingMethod == None):
ml.preSamplingMethod = "latinHyperCube"
print("gpo (preSamplingMethod): defaulting to: latinHyperCube during the pre-iterations (alternatives: sobol, uniform).")
##########################################################################
# Helper: compile the results and write to file
##########################################################################
def writeToFile_helper(ml, sm=None, fileOutName=None, noLLests=False):
# Set file name from parameter
if ((ml.fileOutName != None) & (fileOutName == None)):
fileOutName = ml.fileOutName
# Construct the columns labels
if (noLLests):
columnlabels = [None] * (ml.nPars + 1)
else:
columnlabels = [None] * (ml.nPars + 3)
for ii in range(0, ml.nPars):
columnlabels[ii] = "th" + str(ii)
columnlabels[ml.nPars] = "step"
if (noLLests == False):
columnlabels[ml.nPars + 1] = "diffLogLikelihood"
columnlabels[ml.nPars + 2] = "logLikelihood"
# Compile the results for output
if (noLLests):
out = np.hstack((ml.th, ml.step))
else:
out = np.hstack((ml.th, ml.step, ml.llDiff, ml.ll))
# Write out the results to file
fileOut = pandas.DataFrame(out, columns=columnlabels)
if (fileOutName == None):
if (sm.filterType == "sPF"):
fileOutName = 'results/' + str(ml.filePrefix) + '/' + str(
ml.optMethod) + '_' + sm.filterType + '_N' + str(sm.nPart) + '/' + str(ml.dataset) + '.csv'
elif (sm.filterType == "kf"):
fileOutName = 'results/' + str(ml.filePrefix) + '/' + str(
ml.optMethod) + '_' + sm.filterType + '/' + str(ml.dataset) + '.csv'
else:
fileOutName = 'results/' + str(ml.filePrefix) + '/' + str(ml.optMethod) + '_' + sm.filterType + \
'_' + sm.smootherType + '_N' + \
str(sm.nPart) + '/' + str(ml.dataset) + '.csv'
ensure_dir(fileOutName)
fileOut.to_csv(fileOutName)
print("writeToFile_helper: wrote results to file: " + fileOutName)
##############################################################################
# Calculate the pdf of a univariate Gaussian
##############################################################################
def uninormpdf(x, mu, sigma):
return 1.0 / np.sqrt(2.0 * np.pi * sigma**2) * np.exp(- 0.5 * (x - mu)**2 * sigma**(-2))
##############################################################################
# Calculate the log-pdf of a univariate Gaussian
##############################################################################
def loguninormpdf(x, mu, sigma):
return -0.5 * np.log(2.0 * np.pi * sigma**2) - 0.5 * (x - mu)**2 * sigma**(-2)
##############################################################################
# Calculate the log-pdf of a multivariate Gaussian with mean vector mu and covariance matrix S
##############################################################################
def lognormpdf(x, mu, S):
nx = len(S)
norm_coeff = nx * np.log(2.0 * np.pi) + np.linalg.slogdet(S)[1]
err = x - mu
numerator = np.dot(np.dot(err, np.linalg.pinv(S)), err.transpose())
return -0.5 * (norm_coeff + numerator)
##############################################################################
# Check if a matrix is positive semi-definite but checking for negative eigenvalues
##############################################################################
def isPSD(x):
return np.all(np.linalg.eigvals(x) > 0)
##############################################################################
# Calculate vector Inf-norm
##############################################################################
def vecnorm(x):
return np.amax(np.abs(x))
##############################################################################
# Check if dirs for outputs exists, otherwise create them
##############################################################################
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
##############################################################################
##############################################################################
# End of file
##############################################################################
##############################################################################
|
compops/gpo-abc2015
|
para/ml_helpers.py
|
Python
|
mit
| 9,739
|
[
"Gaussian"
] |
690b288a754ee960e152e57d0f3a21bc3f5a9bc14712dcada8b4a598d8c2289f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from CTFd.models import Users, Solves, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Teams
from CTFd import create_app
from random import randint
import datetime
import random
import hashlib
import os
import sys
app = create_app()
USER_AMOUNT = 50
TEAM_AMOUNT = 10
CHAL_AMOUNT = 20
categories = [
'Exploitation',
'Reversing',
'Web',
'Forensics',
'Scripting',
'Cryptography',
'Networking',
]
lorems = [
'Lorem', 'ipsum', 'dolor', 'sit', 'amet,', 'consectetur', 'adipiscing', 'elit.',
'Proin', 'fringilla', 'elit', 'velit,', 'sed', 'scelerisque', 'tellus', 'dapibus',
'vel.', 'Aenean', 'at', 'urna', 'porta,', 'fringilla', 'erat', 'eget,',
'lobortis', 'quam.', 'Praesent', 'luctus,', 'quam', 'at', 'consequat', 'luctus,',
'mauris', 'sem', 'pretium', 'metus,', 'eu', 'viverra', 'dui', 'leo',
'in', 'tortor.', 'Cras', 'iaculis', 'enim', 'erat,', 'sed', 'gravida',
'velit', 'consectetur', 'a.', 'Duis', 'eget', 'fermentum', 'elit.', 'Vivamus',
'laoreet', 'elementum', 'massa,', 'ut', 'sodales', 'mi', 'gravida', 'at.',
'Vivamus', 'dignissim', 'in', 'eros', 'non', 'iaculis.', 'Vivamus', 'nec',
'sem', 'fringilla,', 'semper', 'lectus', 'in,', 'malesuada', 'tellus.', 'Vestibulum',
'mattis', 'commodo', 'enim', 'sit', 'amet', 'scelerisque.', 'Proin', 'at',
'condimentum', 'nisi,', 'nec', 'fringilla', 'ante.', 'Vestibulum', 'sit', 'amet',
'neque', 'sit', 'amet', 'elit', 'placerat', 'interdum', 'egestas', 'ac',
'malesuada', 'quis', 'arcu', 'ac', 'blandit.', 'Vivamus', 'in', 'massa',
'a', 'purus', 'bibendum', 'sagittis.', 'Nunc', 'venenatis', 'lacus', 'sed',
'nulla', 'dapibus,', 'consequat', 'laoreet', 'nisi', 'faucibus.', 'Nam', 'consequat',
'viverra', 'nibh', 'a', 'cursus.', 'Phasellus', 'tristique', 'justo', 'vitae',
'rutrum', 'pharetra.', 'Sed', 'sed', 'porttitor', 'lacus.', 'Nam', 'ornare',
'sit', 'amet', 'nisi', 'imperdiet', 'vulputate.', 'Maecenas', 'hendrerit', 'ullamcorper',
'elit,', 'sed', 'pellentesque', 'lacus', 'bibendum', 'sit', 'amet.', 'Aliquam',
'consectetur', 'odio', 'quis', 'tellus', 'ornare,', 'id', 'malesuada', 'dui',
'rhoncus.', 'Quisque', 'fringilla', 'pellentesque', 'nulla', 'id', 'congue.', 'Nulla',
'ultricies', 'dolor', 'tristique', 'facilisis', 'at', 'accumsan', 'nisi.', 'Praesent',
'commodo,', 'mauris', 'sit', 'amet', 'placerat', 'condimentum,', 'nibh', 'leo',
'pulvinar', 'justo,', 'vel', 'dignissim', 'mi', 'dolor', 'et', 'est.',
'Nulla', 'facilisi.', 'Sed', 'nunc', 'est,', 'lobortis', 'id', 'diam',
'nec,', 'vulputate', 'varius', 'orci.', 'Maecenas', 'iaculis', 'vehicula', 'eros',
'eu', 'congue.', 'Nam', 'tempor', 'commodo', 'lobortis.', 'Donec', 'eget',
'posuere', 'dolor,', 'ut', 'rhoncus', 'tortor.', 'Donec', 'et', 'quam',
'quis', 'urna', 'rhoncus', 'fermentum', 'et', 'ut', 'tellus.', 'Aliquam',
'erat', 'volutpat.', 'Morbi', 'porttitor', 'ante', 'nec', 'porta', 'mollis.',
'Ut', 'sodales', 'pellentesque', 'rutrum.', 'Nullam', 'elit', 'eros,', 'sollicitudin',
'ac', 'rutrum', 'sit', 'amet,', 'eleifend', 'vel', 'nulla.', 'Morbi',
'quis', 'lacinia', 'nisi.', 'Integer', 'at', 'neque', 'vel', 'velit',
'tincidunt', 'elementum', 'lobortis', 'sit', 'amet', 'tellus.', 'Nunc', 'volutpat',
'diam', 'ac', 'diam', 'lacinia,', 'id', 'molestie', 'quam', 'eu',
'ultricies', 'ligula.', 'Duis', 'iaculis', 'massa', 'massa,', 'eget', 'venenatis',
'dolor', 'fermentum', 'laoreet.', 'Nam', 'posuere,', 'erat', 'quis', 'tempor',
'consequat,', 'purus', 'erat', 'hendrerit', 'arcu,', 'nec', 'aliquam', 'ligula',
'augue', 'vitae', 'felis.', 'Vestibulum', 'tincidunt', 'ipsum', 'vel', 'pharetra',
'lacinia.', 'Quisque', 'dignissim,', 'arcu', 'non', 'feugiat', 'semper,', 'felis',
'est', 'commodo', 'lorem,', 'malesuada', 'elementum', 'nibh', 'lectus', 'porttitor',
'nisi.', 'Duis', 'non', 'lacinia', 'nisl.', 'Etiam', 'ante', 'nisl,',
'mattis', 'eget', 'convallis', 'vel,', 'ullamcorper', 'ac', 'nisl.', 'Duis',
'eu', 'massa', 'at', 'urna', 'laoreet', 'convallis.', 'Donec', 'tincidunt',
'sapien', 'sit', 'amet', 'varius', 'eu', 'dignissim', 'tortor,', 'elementum',
'gravida', 'eros.', 'Cras', 'viverra', 'accumsan', 'erat,', 'et', 'euismod',
'dui', 'placerat', 'ac.', 'Ut', 'tortor', 'arcu,', 'euismod', 'vitae',
'aliquam', 'in,', 'interdum', 'vitae', 'magna.', 'Vestibulum', 'leo', 'ante,',
'posuere', 'eget', 'est', 'non,', 'adipiscing', 'ultrices', 'erat.', 'Donec',
'suscipit', 'felis', 'molestie,', 'ultricies', 'dui', 'a,', 'facilisis', 'magna.',
'Cum', 'sociis', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient',
'montes,', 'nascetur', 'ridiculus', 'mus.', 'Nulla', 'quis', 'odio', 'sit',
'amet', 'ante', 'tristique', 'accumsan', 'ut', 'iaculis', 'neque.', 'Vivamus',
'in', 'venenatis', 'enim.', 'Nunc', 'dignissim', 'justo', 'neque,', 'sed',
'ultricies', 'justo', 'dictum', 'in.', 'Nulla', 'eget', 'nunc', 'ac',
'arcu', 'vestibulum', 'bibendum', 'vitae', 'quis', 'tellus.', 'Morbi', 'bibendum,',
'quam', 'ac', 'cursus', 'posuere,', 'purus', 'lectus', 'tempor', 'est,',
'eu', 'iaculis', 'quam', 'enim', 'a', 'nibh.', 'Etiam', 'consequat',
]
hipsters = [
'Ethnic', 'narwhal', 'pickled', 'Odd', 'Future', 'cliche', 'VHS', 'whatever',
'Etsy', 'American', 'Apparel', 'kitsch', 'wolf', 'mlkshk', 'fashion', 'axe',
'ethnic', 'banh', 'mi', 'cornhole', 'scenester', 'Echo', 'Park', 'Dreamcatcher',
'tofu', 'fap', 'selvage', 'authentic', 'cliche', 'High', 'Life', 'brunch',
'pork', 'belly', 'viral', 'XOXO', 'drinking', 'vinegar', 'bitters', 'Wayfarers',
'gastropub', 'dreamcatcher', 'chillwave', 'Shoreditch', 'kale', 'chips', 'swag', 'street',
'art', 'put', 'a', 'bird', 'on', 'it', 'Vice', 'synth',
'cliche', 'retro', 'Master', 'cleanse', 'ugh', 'Austin', 'slow-carb', 'small',
'batch', 'Hashtag', 'food', 'truck', 'deep', 'v', 'semiotics', 'chia',
'normcore', 'bicycle', 'rights', 'Austin', 'drinking', 'vinegar', 'hella', 'readymade',
'farm-to-table', 'Wes', 'Anderson', 'put', 'a', 'bird', 'on', 'it',
'freegan', 'Synth', 'lo-fi', 'food', 'truck', 'chambray', 'Shoreditch', 'cliche',
'kogiSynth', 'lo-fi', 'fap', 'single-origin', 'coffee', 'brunch', 'butcher', 'Pickled',
'Etsy', 'locavore', 'forage', 'pug', 'stumptown', 'occupy', 'PBR&B', 'actually',
'shabby', 'chic', 'church-key', 'disrupt', 'lomo', 'hoodie', 'Tumblr', 'biodiesel',
'Pinterest', 'butcher', 'Hella', 'Carles', 'pour-over', 'YOLO', 'VHS', 'literally',
'Selvage', 'narwhal', 'flexitarian', 'wayfarers', 'kitsch', 'bespoke', 'sriracha', 'Banh',
'mi', '8-bit', 'cornhole', 'viral', 'Tonx', 'keytar', 'gastropub', 'YOLO',
'hashtag', 'food', 'truck', '3', 'wolf', 'moonFingerstache', 'flexitarian', 'craft',
'beer', 'shabby', 'chic', '8-bit', 'try-hard', 'semiotics', 'Helvetica', 'keytar',
'PBR', 'four', 'loko', 'scenester', 'keytar', '3', 'wolf', 'moon',
'sriracha', 'gluten-free', 'literally', 'try-hard', 'put', 'a', 'bird', 'on',
'it', 'cornhole', 'blog', 'fanny', 'pack', 'Mumblecore', 'pickled', 'distillery',
'butcher', 'Ennui', 'tote', 'bag', 'letterpress', 'disrupt', 'keffiyeh', 'art',
'party', 'aesthetic', 'Helvetica', 'stumptown', 'Wes', 'Anderson', 'next', 'level',
"McSweeney's", 'cornhole', 'Schlitz', 'skateboard', 'pop-up', 'Chillwave', 'biodiesel', 'semiotics',
'seitan', 'authentic', 'bicycle', 'rights', 'wolf', 'pork', 'belly', 'letterpress',
'locavore', 'whatever', 'fixie', 'viral', 'mustache', 'beard', 'Hashtag', 'sustainable',
'lomo', 'cardigan', 'lo-fiWilliamsburg', 'craft', 'beer', 'bitters', 'iPhone', 'gastropub',
'messenger', 'bag', 'Organic', 'post-ironic', 'fingerstache', 'ennui', 'banh', 'mi',
'Art', 'party', 'bitters', 'twee', 'bespoke', 'church-key', 'Intelligentsia', 'sriracha',
'Echo', 'Park', 'Tofu', 'locavore', 'street', 'art', 'freegan', 'farm-to-table',
'distillery', 'hoodie', 'swag', 'ugh', 'YOLO', 'VHS', 'Cred', 'hella',
'readymade', 'distillery', 'Banh', 'mi', 'Echo', 'Park', "McSweeney's,", 'mlkshk',
'photo', 'booth', 'swag', 'Odd', 'Future', 'squid', 'Tonx', 'craft',
'beer', 'High', 'Life', 'tousled', 'PBR', 'you', 'probably', "haven't",
'heard', 'of', 'them', 'locavore', 'PBR&B', 'street', 'art', 'pop-up',
]
names = [
'James', 'John', 'Robert', 'Michael', 'William', 'David', 'Richard', 'Joseph',
'Charles', 'Thomas', 'Christopher', 'Daniel', 'Matthew', 'Donald', 'Anthony', 'Paul',
'Mark', 'George', 'Steven', 'Kenneth', 'Andrew', 'Edward', 'Brian', 'Joshua',
'Kevin', 'Ronald', 'Timothy', 'Jason', 'Jeffrey', 'Gary', 'Ryan', 'Nicholas',
'Eric', 'Stephen', 'Jacob', 'Larry', 'Frank', 'Jonathan', 'Scott', 'Justin',
'Raymond', 'Brandon', 'Gregory', 'Samuel', 'Patrick', 'Benjamin', 'Jack', 'Dennis',
'Jerry', 'Alexander', 'Tyler', 'Douglas', 'Henry', 'Peter', 'Walter', 'Aaron',
'Jose', 'Adam', 'Harold', 'Zachary', 'Nathan', 'Carl', 'Kyle', 'Arthur',
'Gerald', 'Lawrence', 'Roger', 'Albert', 'Keith', 'Jeremy', 'Terry', 'Joe',
'Sean', 'Willie', 'Jesse', 'Ralph', 'Billy', 'Austin', 'Bruce', 'Christian',
'Roy', 'Bryan', 'Eugene', 'Louis', 'Harry', 'Wayne', 'Ethan', 'Jordan',
'Russell', 'Alan', 'Philip', 'Randy', 'Juan', 'Howard', 'Vincent', 'Bobby',
'Dylan', 'Johnny', 'Phillip', 'Craig', 'Mary', 'Patricia', 'Elizabeth', 'Jennifer',
'Linda', 'Barbara', 'Susan', 'Margaret', 'Jessica', 'Dorothy', 'Sarah', 'Karen',
'Nancy', 'Betty', 'Lisa', 'Sandra', 'Helen', 'Donna', 'Ashley', 'Kimberly',
'Carol', 'Michelle', 'Amanda', 'Emily', 'Melissa', 'Laura', 'Deborah', 'Stephanie',
'Rebecca', 'Sharon', 'Cynthia', 'Ruth', 'Kathleen', 'Anna', 'Shirley', 'Amy',
'Angela', 'Virginia', 'Brenda', 'Pamela', 'Catherine', 'Katherine', 'Nicole', 'Christine',
'Janet', 'Debra', 'Carolyn', 'Samantha', 'Rachel', 'Heather', 'Maria', 'Diane',
'Frances', 'Joyce', 'Julie', 'Martha', 'Joan', 'Evelyn', 'Kelly', 'Christina',
'Emma', 'Lauren', 'Alice', 'Judith', 'Marie', 'Doris', 'Ann', 'Jean',
'Victoria', 'Cheryl', 'Megan', 'Kathryn', 'Andrea', 'Jacqueline', 'Gloria', 'Teresa',
'Janice', 'Sara', 'Rose', 'Julia', 'Hannah', 'Theresa', 'Judy', 'Mildred',
'Grace', 'Beverly', 'Denise', 'Marilyn', 'Amber', 'Danielle', 'Brittany', 'Diana',
'Jane', 'Lori', 'Olivia', 'Tiffany', 'Kathy', 'Tammy', 'Crystal', 'Madison',
]
emails = [
'@gmail.com',
'@yahoo.com',
'@outlook.com',
'@hotmail.com',
'@mailinator.com',
'@poly.edu',
'@nyu.edu'
]
extensions = [
'.doc', '.log', '.msg', '.rtf', '.txt', '.wpd', '.wps', '.123',
'.csv', '.dat', '.db ', '.dll', '.mdb', '.pps', '.ppt', '.sql',
'.wks', '.xls', '.xml', '.mng', '.pct', '.bmp', '.gif', '.jpe',
'.jpg', '.png', '.psd', '.psp', '.tif', '.ai ', '.drw', '.dxf',
'.eps', '.ps ', '.svg', '.3dm', '.3dm', '.ind', '.pdf', '.qxd',
'.qxp', '.aac', '.aif', '.iff', '.m3u', '.mid', '.mid', '.mp3',
'.mpa', '.ra ', '.ram', '.wav', '.wma', '.3gp', '.asf', '.asx',
'.avi', '.mov', '.mp4', '.mpg', '.qt ', '.rm ', '.swf', '.wmv',
'.asp', '.css', '.htm', '.htm', '.js ', '.jsp', '.php', '.xht',
'.fnt', '.fon', '.otf', '.ttf', '.8bi', '.plu', '.xll', '.cab',
'.cpl', '.cur', '.dmp', '.drv', '.key', '.lnk', '.sys', '.cfg',
'.ini', '.reg', '.app', '.bat', '.cgi', '.com', '.exe', '.pif',
'.vb ', '.ws ', '.deb', '.gz ', '.pkg', '.rar', '.sea', '.sit',
'.sit', '.zip', '.bin', '.hqx', '.0 E', '.mim', '.uue', '.cpp',
'.jav', '.pl ', '.bak', '.gho', '.old', '.ori', '.tmp', '.dmg',
'.iso', '.toa', '.vcd', '.gam', '.nes', '.rom', '.sav', '.msi',
]
def gen_sentence():
return ' '.join(random.sample(lorems, 50))
def gen_name():
return random.choice(names)
def gen_email():
return random.choice(emails)
def gen_category():
return random.choice(categories)
def gen_value():
return random.choice(range(100, 500, 50))
def gen_word():
return random.choice(hipsters)
def gen_file():
return gen_word() + random.choice(extensions)
def random_date(start, end):
return start + datetime.timedelta(
seconds=randint(0, int((end - start).total_seconds())))
if __name__ == '__main__':
with app.app_context():
db = app.db
### Generating Challenges
print("GENERATING CHALLENGES")
for x in range(CHAL_AMOUNT):
word = gen_word()
flags = [{'flag': word, 'type': 0}]
db.session.add(Challenges(word, gen_sentence(), gen_value(), gen_category(), flags))
db.session.commit()
db.session.add(Keys(x + 1, word, 0))
db.session.commit()
### Generating Files
print("GENERATING FILES")
AMT_CHALS_WITH_FILES = int(CHAL_AMOUNT * (3.0 / 4.0))
for x in range(AMT_CHALS_WITH_FILES):
chal = random.randint(1, CHAL_AMOUNT)
filename = gen_file()
md5hash = hashlib.md5(filename).hexdigest()
db.session.add(Files(chal, os.path.join('static/uploads', md5hash, filename)))
db.session.commit()
### Generating Users
print("GENERATING USERS")
used = []
count = 0
while count < USER_AMOUNT:
name = gen_name()
if name not in used:
used.append(name)
team = Users(name, name.lower() + gen_email(), 'password')
team.verified = True
db.session.add(team)
count += 1
db.session.commit()
### Generating Teams
print("GENERATING TEAMS")
used_names = []
used_users = []
count = 0
while count < TEAM_AMOUNT:
name = gen_word() + ' ' + gen_word()
user_id = random.randint(0, 50)
if name not in used_names and user_id not in used_users:
team = Teams(name, user_id)
db.session.add(team)
db.session.flush()
user = Users.query.filter_by(id=user_id).first()
user.teamid = team.id
used_users.append(user_id)
used_names.append(name)
count += 1
db.session.commit()
for user_id in range(1, 51):
user = Users.query.filter_by(id=user_id).first()
user.teamid = (user_id % 5) + 1
db.session.commit()
### Generating Solves
print("GENERATING SOLVES")
for x in range(USER_AMOUNT):
used = []
base_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=-10000)
for y in range(random.randint(1, CHAL_AMOUNT)):
chalid = random.randint(1, CHAL_AMOUNT)
if chalid not in used:
used.append(chalid)
solve = Solves(chalid, x + 1, '127.0.0.1', gen_word())
new_base = random_date(base_time, base_time + datetime.timedelta(minutes=random.randint(30, 60)))
solve.date = new_base
base_time = new_base
db.session.add(solve)
db.session.commit()
### Generating Wrong Keys
print("GENERATING WRONG KEYS")
for x in range(USER_AMOUNT):
used = []
base_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=-10000)
for y in range(random.randint(1, CHAL_AMOUNT * 20)):
chalid = random.randint(1, CHAL_AMOUNT)
if chalid not in used:
used.append(chalid)
wrong = WrongKeys(x + 1, chalid, gen_word())
new_base = random_date(base_time, base_time + datetime.timedelta(minutes=random.randint(30, 60)))
wrong.date = new_base
base_time = new_base
db.session.add(wrong)
db.session.commit()
db.session.close()
|
RITC3/RC3_CTFD
|
populate.py
|
Python
|
apache-2.0
| 16,078
|
[
"Amber",
"Brian",
"CRYSTAL"
] |
308a36dddb2ba07e63022e84cc97561aa953e2c0f19cba4780aca25c270c52b2
|
import os
import cmd
import sys
import shlex
import pprint
import argparse
import synapse.neuron as s_neuron
import synapse.eventbus as s_eventbus
import synapse.datamodel as s_datamodel
class Cmd(cmd.Cmd,s_eventbus.EventBus):
def __init__(self, neu):
cmd.Cmd.__init__(self)
self.prompt = 'neu> '
s_eventbus.EventBus.__init__(self)
self.neu = neu
self._cmd_print = True
moddef = self.neu.getModelDict()
self.model = s_datamodel.DataModel(model=moddef)
def getArgParser(self):
return argparse.ArgumentParser()
def banner(self):
'''
Print the initial hello/banner from the neuron.
'''
peer = self.neu.getPeerTufo()
name = peer[1].get('neuron:name','<unnamed>')
self.vprint('Connected To: %s (%s)' % (name,peer[0]))
def vprint(self, msg):
if self._cmd_print:
print(msg)
self.fire('cmd:print', msg=msg)
def do_set(self, line):
'''
Set a property on the current neuron.
Usage:
neu> set [options] <prop> <valu>
Options:
--force - Set an option which is *not* part of the data model
Example:
neu> set name "foo bar"
'''
pars = self.getArgParser()
pars.add_argument('--force', default=False, action='store_true', help='Set a non-datamodel property')
pars.add_argument('prop', help='property name')
pars.add_argument('valu', help='property value')
opts = pars.parse_args( shlex.split(line) )
fullprop = 'neuron:%s' % opts.prop
if self.model.getPropDef(fullprop) == None and not opts.force:
self.vprint('unknown neuron property: %s' % (opts.prop,))
return
try:
realvalu = self.model.getPropParse(fullprop,opts.valu)
except Exception as e:
self.vprint('Invalid Value: %s (%s)' % (opts.valu,e))
self.neu.setNeuProp(opts.prop,realvalu)
self.vprint('%s -> %s' % (opts.prop, opts.valu))
def do_mesh(self, line):
'''
Pretty print the entire neuron mesh dictionary.
Example:
'''
mesh = self.neu.getMeshDict()
outp = pprint.pformat(mesh)
self.vprint(outp)
def do_peers(self, line):
'''
List the known neuron peers.
Example:
neu> list
'''
mesh = self.neu.getMeshDict()
peers = list( mesh.get('peers',{}).values() )
if len(peers) == 0:
return self.vprint('no peers?!?!')
for peer in peers:
name = peer[1].get('neuron:name')
self.vprint('%s: %s' % (peer[0],name))
#def do_quit(self):
def main(argv):
'''
A tool for inititializing neuron options.
'''
p = argparse.ArgumentParser(prog='neutool')
p.add_argument('url', help='Neuron telepath URL')
opts = p.parse_args(argv)
neu = s_neuron.openurl( opts.url )
cli = Cmd(neu)
cli.banner()
cli.cmdloop()
neu.fini()
if __name__ == '__main__':
sys.exit( main( sys.argv[1:] ) )
|
imjonsnooow/synapse
|
synapse/tools/neutool.py
|
Python
|
apache-2.0
| 3,150
|
[
"NEURON"
] |
398f1a5cd5df91d30d2e151a16df152c114b1f74847c0cd428922778c3fa90c1
|
#!/usr/bin/env python
"""
Framework to start a simulated vehicle and connect it to MAVProxy.
Peter Barker, April 2016
based on sim_vehicle.sh by Andrew Tridgell, October 2011
"""
import atexit
import getpass
import optparse
import os
import os.path
import signal
import subprocess
import sys
import tempfile
import time
import shlex
# List of open terminal windows for macosx
windowID = []
class CompatError(Exception):
"""A custom exception class to hold state if we encounter the parse error we are looking for"""
def __init__(self, error, opts, rargs):
Exception.__init__(self, error)
self.opts = opts
self.rargs = rargs
class CompatOptionParser(optparse.OptionParser):
"""An option parser which emulates the behaviour of the old sim_vehicle.sh; if passed -C, the first argument not understood starts a list of arguments that are passed straight to mavproxy"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, error):
"""Override default error handler called by optparse.OptionParser.parse_args when a parse error occurs; raise a detailed exception which can be caught"""
if error.find("no such option") != -1:
raise CompatError(error, self.values, self.rargs)
optparse.OptionParser.error(self, error)
def parse_args(self, args=None, values=None):
"""Wrap parse_args so we can catch the exception raised upon discovering the known parameter parsing error"""
try:
opts, args = optparse.OptionParser.parse_args(self)
except CompatError as e:
if not e.opts.sim_vehicle_sh_compatible:
print(e)
print("Perhaps you want --sim_vehicle_sh_compatible (-C)?")
sys.exit(1)
if e.opts.mavproxy_args:
print("--mavproxy-args not permitted in compat mode")
sys.exit(1)
args = []
opts = e.opts
mavproxy_args = [str(e)[16:]] # this trims "no such option" off
mavproxy_args.extend(e.rargs)
opts.ensure_value("mavproxy_args", " ".join(mavproxy_args))
return opts, args
def cygwin_pidof(proc_name):
""" Thanks to kata198 for this:
https://github.com/kata198/cygwin-ps-misc/blob/master/pidof
"""
pipe = subprocess.Popen("ps -ea | grep " + proc_name, shell=True, stdout=subprocess.PIPE)
output_lines = pipe.stdout.read().replace("\r", "").split("\n")
ret = pipe.wait()
pids = []
if ret != 0:
# No results
return []
for line in output_lines:
if not line:
continue
line_split = [item for item in line.split(' ') if item]
cmd = line_split[-1].split('/')[-1]
if cmd == proc_name:
try:
pid = int(line_split[0].strip())
except:
pid = int(line_split[1].strip())
if pid not in pids:
pids.append(pid)
return pids
def under_cygwin():
"""Return if Cygwin binary exist"""
return os.path.exists("/usr/bin/cygstart")
def under_macos():
return sys.platform == 'darwin'
def kill_tasks_cygwin(victims):
"""Shell out to ps -ea to find processes to kill"""
for victim in list(victims):
pids = cygwin_pidof(victim)
# progress("pids for (%s): %s" % (victim,",".join([ str(p) for p in pids])))
for apid in pids:
os.kill(apid, signal.SIGKILL)
def kill_tasks_macos():
for window in windowID:
cmd = "osascript -e \'tell application \"Terminal\" to close (window(get index of window id %s))\'" % window
os.system(cmd)
def kill_tasks_psutil(victims):
"""Use the psutil module to kill tasks by name. Sadly, this module is not available on Windows, but when it is we should be able to *just* use this routine"""
import psutil
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
continue
if proc.name in victims:
proc.kill()
def kill_tasks_pkill(victims):
"""Shell out to pkill(1) to kill processed by name"""
for victim in victims: # pkill takes a single pattern, so iterate
cmd = ["pkill", victim]
run_cmd_blocking("pkill", cmd, quiet=True)
class BobException(Exception):
"""Handle Bob's Exceptions"""
pass
def kill_tasks():
"""Clean up stray processes by name. This is a somewhat shotgun approach"""
progress("Killing tasks")
try:
victim_names = {
'JSBSim',
'lt-JSBSim',
'ArduPlane.elf',
'ArduCopter.elf',
'APMrover2.elf',
'AntennaTracker.elf',
'JSBSIm.exe',
'MAVProxy.exe',
'runsim.py',
'AntennaTracker.elf',
}
for frame in _options_for_frame.keys():
if "waf_target" not in _options_for_frame[frame]:
continue
exe_name = os.path.basename(_options_for_frame[frame]["waf_target"])
victim_names.add(exe_name)
if under_cygwin():
return kill_tasks_cygwin(victim_names)
if under_macos():
return kill_tasks_macos()
try:
kill_tasks_psutil(victim_names)
except ImportError:
kill_tasks_pkill(victim_names)
except Exception as e:
progress("kill_tasks failed: {}".format(str(e)))
def check_jsbsim_version():
"""Assert that the JSBSim we will run is the one we expect to run"""
jsbsim_cmd = ["JSBSim", "--version"]
progress_cmd("Get JSBSim version", jsbsim_cmd)
try:
jsbsim_version = subprocess.Popen(jsbsim_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
jsbsim_version = '' # this value will trigger the ".index"
# check below and produce a reasonable
# error message
try:
jsbsim_version.index("ArduPilot")
except ValueError:
print(r"""
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://ardupilot.org/dev/docs/setting-up-sitl-on-linux.html
for more details
=========================================================
""")
sys.exit(1)
def progress(text):
"""Display sim_vehicle progress text"""
print("SIM_VEHICLE: " + text)
def find_autotest_dir():
"""Return path to autotest directory"""
return os.path.dirname(os.path.realpath(__file__))
def find_root_dir():
"""Return path to root directory"""
return os.path.realpath(os.path.join(find_autotest_dir(), '../..'))
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
_options_for_frame = {
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
# COPTER
"+": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter-quad",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the param fetch happens asynchronously
"default_params_filename": "default_params/copter.parm",
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl-hexa",
"waf_target": "bin/arducopter-hexa",
"default_params_filename": "default_params/copter.parm",
},
"octa-quad": {
"make_target": "sitl-octa-quad",
"waf_target": "bin/arducopter-octa-quad",
"default_params_filename": "default_params/copter.parm",
},
"octa": {
"make_target": "sitl-octa",
"waf_target": "bin/arducopter-octa",
"default_params_filename": "default_params/copter.parm",
},
"tri": {
"make_target": "sitl-tri",
"waf_target": "bin/arducopter-tri",
"default_params_filename": "default_params/copter-tri.parm",
},
"y6": {
"make_target": "sitl-y6",
"waf_target": "bin/arducopter-y6",
"default_params_filename": "default_params/copter-y6.parm",
},
# COPTER TYPES
"IrisRos": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"firefly": {
"waf_target": "bin/arducopter-firefly",
"default_params_filename": "default_params/firefly.parm",
},
# HELICOPTER
"heli": {
"make_target": "sitl-heli",
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"make_target": "sitl-heli-dual",
"waf_target": "bin/arducopter-heli-dual",
"default_params_filename": "default_params/copter-heli.parm",
},
# OTHER COPTERS
"singlecopter": {
"make_target": "sitl-single",
"waf_target": "bin/arducopter-single",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"make_target": "sitl-coax",
"waf_target": "bin/arducopter-coax",
"default_params_filename": "default_params/copter-coax.parm",
},
# PLANE
"quadplane-tilttri": {
"make_target": "sitl-tri",
"waf_target": "bin/arduplane-tri",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tri": {
"make_target": "sitl-tri",
"waf_target": "bin/arduplane-tri",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-elevons.parm",
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-vtail.parm",
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover-skid.parm",
},
# SIM
"gazebo-iris": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/gazebo-iris.parm",
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
}
_default_waf_target = {
"ArduPlane": "bin/arduplane",
"ArduCopter": "bin/arducopter-quad",
"APMrover2": "bin/ardurover",
"AntennaTracker": "bin/antennatracker",
}
def default_waf_target(vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
return _default_waf_target[vehicle]
def options_for_frame(frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
if frame in _options_for_frame:
ret = _options_for_frame[frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane"]:
if frame.startswith(p):
ret = _options_for_frame[p]
break
if ret is None:
if frame.endswith("-heli"):
ret = _options_for_frame["heli"]
if ret is None:
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "make_target" not in ret:
ret["make_target"] = "sitl"
if "waf_target" not in ret:
ret["waf_target"] = default_waf_target(vehicle)
if opts.build_target is not None:
ret["make_target"] = opts.build_target
ret["waf_target"] = opts.build_target
return ret
def do_build_waf(opts, frame_options):
"""Build sitl using waf"""
progress("WAF build")
old_dir = os.getcwd()
root_dir = find_root_dir()
os.chdir(root_dir)
waf_light = os.path.join(root_dir, "modules/waf/waf-light")
cmd_configure = [waf_light, "configure", "--board", "sitl"]
if opts.debug:
cmd_configure.append("--debug")
pieces = [ shlex.split(x) for x in opts.waf_configure_args ]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd_blocking("Configure waf", cmd_configure, check=True)
if opts.clean:
run_cmd_blocking("Building clean", [waf_light, "clean"])
cmd_build = [waf_light, "build", "--target", frame_options["waf_target"]]
if opts.jobs is not None:
cmd_build += ['-j', str(opts.jobs)]
pieces = [ shlex.split(x) for x in opts.waf_build_args ]
for piece in pieces:
cmd_build.extend(piece)
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0: # build failed
if opts.rebuild_on_failure:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Building clean", [waf_light, "clean"])
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0:
progress("Build failed")
sys.exit(1)
else:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def do_build(vehicledir, opts, frame_options):
"""Build build target (e.g. sitl) in directory vehicledir"""
if opts.build_system == 'waf':
return do_build_waf(opts, frame_options)
old_dir = os.getcwd()
os.chdir(vehicledir)
if opts.clean:
run_cmd_blocking("Building clean", ["make", "clean"])
build_target = frame_options["make_target"]
if opts.debug:
build_target += "-debug"
build_cmd = ["make", build_target]
if opts.jobs is not None:
build_cmd += ['-j', str(opts.jobs)]
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Cleaning", ["make", "clean"])
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def find_location_by_name(autotest, locname):
"""Search locations.txt for locname, return GPS coords"""
locations_filepath = os.path.join(autotest, "locations.txt")
for line in open(locations_filepath, 'r'):
line = line.rstrip("\n")
(name, loc) = line.split("=")
if name == locname:
return loc
print("Failed to find location (%s)" % cmd_opts.location)
sys.exit(1)
def progress_cmd(what, cmd):
"""Print cmd in a way a user could cut-and-paste to get the same effect"""
progress(what)
shell_text = "%s" % (" ".join(['"%s"' % x for x in cmd]))
progress(shell_text)
def run_cmd_blocking(what, cmd, quiet=False, check=False, **kw):
if not quiet:
progress_cmd(what, cmd)
p = subprocess.Popen(cmd, **kw)
ret = os.waitpid(p.pid, 0)
_, sts = ret
if check and sts != 0:
progress("(%s) exited with code %d" % (what,sts,))
sys.exit(1)
return ret
def run_in_terminal_window(autotest, name, cmd):
"""Execute the run_in_terminal_window.sh command for cmd"""
global windowID
runme = [os.path.join(autotest, "run_in_terminal_window.sh"), name]
runme.extend(cmd)
progress_cmd("Run " + name, runme)
if under_macos():
# on MacOS record the window IDs so we can close them later
out = subprocess.Popen(runme, stdout=subprocess.PIPE).communicate()[0]
import re
p = re.compile('tab 1 of window id (.*)')
windowID.append(p.findall(out)[0])
else:
p = subprocess.Popen(runme)
tracker_uarta = None # blemish
def start_antenna_tracker(autotest, opts):
"""Compile and run the AntennaTracker, add tracker to mavproxy"""
global tracker_uarta
progress("Preparing antenna tracker")
tracker_home = find_location_by_name(find_autotest_dir(), opts.tracker_location)
vehicledir = os.path.join(autotest, "../../" + "AntennaTracker")
tracker_frame_options = {
"waf_target": _default_waf_target["AntennaTracker"],
}
do_build(vehicledir, opts, tracker_frame_options)
tracker_instance = 1
os.chdir(vehicledir)
tracker_uarta = "tcp:127.0.0.1:" + str(5760 + 10 * tracker_instance)
exe = os.path.join(vehicledir, "AntennaTracker.elf")
run_in_terminal_window(autotest, "AntennaTracker", ["nice", exe, "-I" + str(tracker_instance), "--model=tracker", "--home=" + tracker_home])
def start_vehicle(binary, autotest, opts, stuff, loc):
"""Run the ArduPilot binary"""
cmd_name = opts.vehicle
cmd = []
if opts.valgrind:
cmd_name += " (valgrind)"
cmd.append("valgrind")
if opts.gdb:
cmd_name += " (gdb)"
cmd.append("gdb")
gdb_commands_file = tempfile.NamedTemporaryFile(delete=False)
atexit.register(os.unlink, gdb_commands_file.name)
for breakpoint in opts.breakpoint:
gdb_commands_file.write("b %s\n" % (breakpoint,))
gdb_commands_file.write("r\n")
gdb_commands_file.close()
cmd.extend(["-x", gdb_commands_file.name])
cmd.append("--args")
if opts.strace:
cmd_name += " (strace)"
cmd.append("strace")
strace_options = ['-o', binary + '.strace', '-s', '8000', '-ttt']
cmd.extend(strace_options)
cmd.append(binary)
cmd.append("-S")
cmd.append("-I" + str(opts.instance))
cmd.extend(["--home", loc])
if opts.wipe_eeprom:
cmd.append("-w")
cmd.extend(["--model", stuff["model"]])
cmd.extend(["--speedup", str(opts.speedup)])
if opts.sitl_instance_args:
cmd.extend(opts.sitl_instance_args.split(" ")) # this could be a lot better..
if opts.mavlink_gimbal:
cmd.append("--gimbal")
if "default_params_filename" in stuff:
path = os.path.join(autotest, stuff["default_params_filename"])
progress("Using defaults from (%s)" % (path,))
cmd.extend(["--defaults", path])
run_in_terminal_window(autotest, cmd_name, cmd)
def start_mavproxy(opts, stuff):
"""Run mavproxy"""
# FIXME: would be nice to e.g. "mavproxy.mavproxy(....).run" rather than shelling out
extra_cmd = ""
cmd = []
if under_cygwin():
cmd.append("/usr/bin/cygstart")
cmd.append("-w")
cmd.append("/cygdrive/c/Program Files (x86)/MAVProxy/mavproxy.exe")
else:
cmd.append("mavproxy.py")
if opts.hil:
cmd.extend(["--load-module", "HIL"])
else:
cmd.extend(["--master", mavlink_port])
if stuff["sitl-port"]:
cmd.extend(["--sitl", simout_port])
# If running inside of a vagrant guest, then we probably want to forward our mavlink out to the containing host OS
ports = [p + 10 * cmd_opts.instance for p in [14550,14551]]
for port in ports:
if getpass.getuser() == "vagrant":
cmd.extend(["--out", "10.0.2.2:" + str(port)])
else:
cmd.extend(["--out", "127.0.0.1:" + str(port)])
if opts.tracker:
cmd.extend(["--load-module", "tracker"])
global tracker_uarta
# tracker_uarta is set when we start the tracker...
extra_cmd += "module load map; tracker set port %s; tracker start; tracker arm;" % (tracker_uarta,)
if opts.mavlink_gimbal:
cmd.extend(["--load-module", "gimbal"])
if "extra_mavlink_cmds" in stuff:
extra_cmd += " " + stuff["extra_mavlink_cmds"]
if opts.mavproxy_args:
cmd.extend(opts.mavproxy_args.split(" ")) # this could be a lot better..
# compatibility pass-through parameters (for those that don't want
# to use -C :-)
for out in opts.out:
cmd.extend(['--out', out])
if opts.map:
cmd.append('--map')
if opts.console:
cmd.append('--console')
if opts.aircraft is not None:
cmd.extend(['--aircraft', opts.aircraft])
if len(extra_cmd):
cmd.extend(['--cmd', extra_cmd])
local_mp_modules_dir = os.path.abspath(
os.path.join(__file__, '..', '..', 'mavproxy_modules'))
env = dict(os.environ)
env['PYTHONPATH'] = local_mp_modules_dir + os.pathsep + env.get('PYTHONPATH', '')
run_cmd_blocking("Run MavProxy", cmd, env=env)
progress("MAVProxy exitted")
# define and run parser
parser = CompatOptionParser("sim_vehicle.py",
epilog="eeprom.bin in the starting directory contains the parameters for your " \
"simulated vehicle. Always start from the same directory. It is "\
"recommended that you start in the main vehicle directory for the vehicle" \
"you are simulating, for example, start in the ArduPlane directory to " \
"simulate ArduPlane")
parser.add_option("-v", "--vehicle", type='string', default=None, help="vehicle type (ArduPlane, ArduCopter or APMrover2)")
parser.add_option("-f", "--frame", type='string', default=None, help="""set aircraft frame type
for copters can choose +, X, quad or octa
for planes can choose elevon or vtail""")
parser.add_option("-C", "--sim_vehicle_sh_compatible", action='store_true', default=False, help="be compatible with the way sim_vehicle.sh works; make this the first option")
parser.add_option("-H", "--hil", action='store_true', default=False, help="start HIL")
group_build = optparse.OptionGroup(parser, "Build options")
group_build.add_option("-N", "--no-rebuild", action='store_true', default=False, help="don't rebuild before starting ardupilot")
group_build.add_option("-D", "--debug", action='store_true', default=False, help="build with debugging")
group_build.add_option("-c", "--clean", action='store_true', default=False, help="do a make clean before building")
group_build.add_option("-j", "--jobs", default=None, type='int', help="number of processors to use during build (default for waf : number of processor, for make : 1)")
group_build.add_option("-b", "--build-target", default=None, type='string', help="override SITL build target")
group_build.add_option("-s", "--build-system", default="waf", type='choice', choices=["make", "waf"], help="build system to use")
group_build.add_option("", "--rebuild-on-failure", dest="rebuild_on_failure", action='store_true', default=False, help="if build fails, do not clean and rebuild")
group_build.add_option("", "--waf-configure-arg", action="append", dest="waf_configure_args", type="string", default=[], help="extra arguments to pass to waf in its configure step")
group_build.add_option("", "--waf-build-arg", action="append", dest="waf_build_args", type="string", default=[], help="extra arguments to pass to waf in its build step")
parser.add_option_group(group_build)
group_sim = optparse.OptionGroup(parser, "Simulation options")
group_sim.add_option("-I", "--instance", default=0, type='int', help="instance of simulator")
group_sim.add_option("-V", "--valgrind", action='store_true', default=False, help="enable valgrind for memory access checking (very slow!)")
group_sim.add_option("-T", "--tracker", action='store_true', default=False, help="start an antenna tracker instance")
group_sim.add_option("-A", "--sitl-instance-args", type='string', default=None, help="pass arguments to SITL instance")
# group_sim.add_option("-R", "--reverse-throttle", action='store_true', default=False, help="reverse throttle in plane")
group_sim.add_option("-G", "--gdb", action='store_true', default=False, help="use gdb for debugging ardupilot")
group_sim.add_option("-g", "--gdb-stopped", action='store_true', default=False, help="use gdb for debugging ardupilot (no auto-start)")
group_sim.add_option("-d", "--delay-start", default=0, type='float', help="delays the start of mavproxy by the number of seconds")
group_sim.add_option("-B", "--breakpoint", type='string', action="append", default=[], help="add a breakpoint at given location in debugger")
group_sim.add_option("-M", "--mavlink-gimbal", action='store_true', default=False, help="enable MAVLink gimbal")
group_sim.add_option("-L", "--location", type='string', default='CMAC', help="select start location from Tools/autotest/locations.txt")
group_sim.add_option("-l", "--custom-location", type='string', default=None, help="set custom start location")
group_sim.add_option("-S", "--speedup", default=1, type='int', help="set simulation speedup (1 for wall clock time)")
group_sim.add_option("-t", "--tracker-location", default='CMAC_PILOTSBOX', type='string', help="set antenna tracker start location")
group_sim.add_option("-w", "--wipe-eeprom", action='store_true', default=False, help="wipe EEPROM and reload parameters")
group_sim.add_option("-m", "--mavproxy-args", default=None, type='string', help="additional arguments to pass to mavproxy.py")
group_sim.add_option("", "--strace", action='store_true', default=False, help="strace the ArduPilot binary")
group_sim.add_option("", "--model", type='string', default=None, help="Override simulation model to use")
parser.add_option_group(group_sim)
# special-cased parameters for mavproxy, because some people's fingers
# have long memories, and they don't want to use -C :-)
group = optparse.OptionGroup(parser, "Compatibility MAVProxy options (consider using --mavproxy-args instead)")
group.add_option("", "--out", default=[], type='string', action="append", help="create an additional mavlink output")
group.add_option("", "--map", default=False, action='store_true', help="load map module on startup")
group.add_option("", "--console", default=False, action='store_true', help="load console module on startup")
group.add_option("", "--aircraft", default=None, help="store state and logs in named directory")
parser.add_option_group(group)
cmd_opts, cmd_args = parser.parse_args()
# clean up processes at exit:
atexit.register(kill_tasks)
progress("Start")
if cmd_opts.sim_vehicle_sh_compatible and cmd_opts.jobs is None:
cmd_opts.jobs = 1
# validate parameters
if cmd_opts.hil:
if cmd_opts.valgrind:
print("May not use valgrind with hil")
sys.exit(1)
if cmd_opts.gdb or cmd_opts.gdb_stopped:
print("May not use gdb with hil")
sys.exit(1)
if cmd_opts.strace:
print("May not use strace with hil")
sys.exit(1)
if cmd_opts.valgrind and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use valgrind with gdb")
sys.exit(1)
if cmd_opts.strace and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use strace with gdb")
sys.exit(1)
if cmd_opts.strace and cmd_opts.valgrind:
print("valgrind and strace almost certainly not a good idea")
# magically determine vehicle type (if required):
if cmd_opts.vehicle is None:
cwd = os.getcwd()
cmd_opts.vehicle = os.path.basename(cwd)
# determine a frame type if not specified:
default_frame_for_vehicle = {
"APMrover2": "rover",
"ArduPlane": "jsbsim",
"ArduCopter": "quad",
"AntennaTracker": "tracker",
}
if cmd_opts.vehicle not in default_frame_for_vehicle:
# try in parent directories, useful for having config in subdirectories
cwd = os.getcwd()
while cwd:
bname = os.path.basename(cwd)
if not bname:
break
if bname in default_frame_for_vehicle:
cmd_opts.vehicle = bname
break
cwd = os.path.dirname(cwd)
# try to validate vehicle
if cmd_opts.vehicle not in default_frame_for_vehicle:
progress("** Is (%s) really your vehicle type? Try -v VEHICLETYPE if not, or be in the e.g. ArduCopter subdirectory" % (cmd_opts.vehicle,))
# determine frame options (e.g. build type might be "sitl")
if cmd_opts.frame is None:
cmd_opts.frame = default_frame_for_vehicle[cmd_opts.vehicle]
# setup ports for this instance
mavlink_port = "tcp:127.0.0.1:" + str(5760 + 10 * cmd_opts.instance)
simout_port = "127.0.0.1:" + str(5501 + 10 * cmd_opts.instance)
frame_infos = options_for_frame(cmd_opts.frame, cmd_opts.vehicle, cmd_opts)
if frame_infos["model"] == "jsbsim":
check_jsbsim_version()
vehicle_dir = os.path.realpath(os.path.join(find_root_dir(), cmd_opts.vehicle))
if not os.path.exists(vehicle_dir):
print("vehicle directory (%s) does not exist" % (vehicle_dir,))
sys.exit(1)
if not cmd_opts.hil:
if cmd_opts.instance == 0:
kill_tasks()
if cmd_opts.tracker:
start_antenna_tracker(find_autotest_dir(), cmd_opts)
if cmd_opts.custom_location:
location = cmd_opts.custom_location
progress("Starting up at %s" % (location,))
else:
location = find_location_by_name(find_autotest_dir(), cmd_opts.location)
progress("Starting up at %s (%s)" % (location, cmd_opts.location))
if cmd_opts.hil:
# (unlikely)
run_in_terminal_window(find_autotest_dir(), "JSBSim", [os.path.join(find_autotest_dir(), "jsb_sim/runsim.py"), "--home", location, "--speedup=" + str(cmd_opts.speedup)])
else:
if not cmd_opts.no_rebuild: # i.e. we should rebuild
do_build(vehicle_dir, cmd_opts, frame_infos)
if cmd_opts.build_system == "waf":
if cmd_opts.debug:
binary_basedir = "build/sitl-debug"
else:
binary_basedir = "build/sitl"
vehicle_binary = os.path.join(find_root_dir(), binary_basedir, frame_infos["waf_target"])
else:
vehicle_binary = os.path.join(vehicle_dir, cmd_opts.vehicle + ".elf")
if not os.path.exists(vehicle_binary):
print("Vehicle binary (%s) does not exist" % (vehicle_binary,))
sys.exit(1)
start_vehicle(vehicle_binary, find_autotest_dir(), cmd_opts, frame_infos, location)
if cmd_opts.delay_start:
progress("Sleeping for %f seconds" % (cmd_opts.delay_start,))
time.sleep(float(cmd_opts.delay_start))
start_mavproxy(cmd_opts, frame_infos)
sys.exit(0)
|
Block137/Dual2
|
Tools/autotest/sim_vehicle.py
|
Python
|
gpl-3.0
| 31,138
|
[
"Firefly"
] |
0e2fdc7e32f4d561f20edd7b6ce031bb1261fd3ebc3e626a57d6abfae083f7d6
|
"""
Extractor for gridded data
y.wang@bom.gov.au
"""
from datetime import date, timedelta
import numpy as np
from scipy.io import netcdf
from .cod import CoD
from .mask import Mask
from .gridded import AwapDailyData
class GriddedExtractor(object):
def __init__(self, cod_base_dir=None, mask_base_dir=None, gridded_base_dir=None, verbose=False):
self.cod_manager = CoD(base_dir=cod_base_dir, verbose=verbose)
self.mask_manager = Mask(base_dir=mask_base_dir, verbose=verbose)
self.awap_manager = AwapDailyData(base_dir=gridded_base_dir, verbose=verbose)
self.verbose = verbose
def extract(self, model, scenario, region_type, season, predictand, region=None, cube=True):
cod_dates = self.cod_manager.read_cod(model, scenario, region_type, season, predictand)
mask = self.mask_manager.read_mask(region or region_type)
data = self.awap_manager.read_data(predictand, cod_dates['adates'], mask)
if cube:
data, lat, lon = self.cubify(data, mask)
else:
lat, lon = self.awap_manager.lat, self.awap_manager.lon
return data, cod_dates['rdates'], lat, lon
@staticmethod
def cubify(data, mask):
""" Reshape the given data of shape (ndays, npoints) to (ndays, nlat, nlon)
"""
lat = np.arange(-4450, -995, 5) / 100.0
lon = np.arange(11200, 15630, 5) / 100.0
idx_mask = np.where(mask != 0)
idx_lat_min = np.min(idx_mask[0])
idx_lat_max = np.max(idx_mask[0]) + 1
idx_lon_min = np.min(idx_mask[1])
idx_lon_max = np.max(idx_mask[1]) + 1
lat_subsetted = lat[idx_lat_min: idx_lat_max]
lon_subsetted = lon[idx_lon_min: idx_lon_max]
mask_subsetted = mask[idx_lat_min: idx_lat_max, idx_lon_min: idx_lon_max]
idx_mask_subsetted = np.where(mask_subsetted.reshape(mask_subsetted.size) != 0)[0]
ret = np.empty((data.shape[0], mask_subsetted.size))
ret[:] = np.NaN
ret[:, idx_mask_subsetted] = data
ret = ret.reshape((data.shape[0], mask_subsetted.shape[0], mask_subsetted.shape[1]))
return ret, lat_subsetted, lon_subsetted
@staticmethod
def save_netcdf(filename, data, dates, lat, lon,
model, scenario, region_type, season, predictand):
dates = (np.array([np.datetime64(d) for d in CoD.format_dates(dates)])
- np.datetime64('1899-12-31')).astype('int')
f = netcdf.netcdf_file(filename, 'w')
f.title = 'Daily gridded climate series (%s, %s, %s, %s, %s)' % (
model, scenario, region_type, season, predictand)
f.institution = 'Bureau of Meteorology'
f.source = 'Statistical Downscaling Model'
f.history = 'Generated on %s' % date.today()
f.createDimension('time', 0)
var_time = f.createVariable('time', np.float32, ('time',))
var_time[:] = dates
var_time.units = 'days since 1899-12-31 00:00:00'
var_time.calendar = 'standard'
f.createDimension('lat', lat.size)
var_lat = f.createVariable('lat', float, ('lat',))
var_lat[:] = lat
var_lat.units = 'degrees_north'
var_lat.long_name = 'latitude'
var_lat.standard_name = 'latitude'
f.createDimension('lon', lon.size)
var_lon = f.createVariable('lon', float, ('lon',))
var_lon[:] = lon
var_lon.units = 'degrees_east'
var_lon.long_name = 'longitude'
var_lon.standard_name = 'longitude'
missing_value = 99999.9
var_data = f.createVariable(predictand, np.float32, ('time', 'lat', 'lon'))
data = data.copy()
data[np.where(np.isnan(data))] = missing_value
var_data[:, :, :] = data
var_data.units = 'mm' if predictand == 'rain' else 'K'
var_data.long_name = predictand
var_data.missing_value = var_data._FillValue = missing_value
f.close()
|
paolap/cwsl-ctools
|
sdm/sdm/extractor.py
|
Python
|
apache-2.0
| 3,948
|
[
"NetCDF"
] |
98110f59572080ba38a46b16f95d792891cc8e83db240e2517578825f59c0687
|
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Samragni Banerjee <samragnibanerjee4@gmail.com>
# Alexander Sokolov <alexander.y.sokolov@gmail.com>
#
import unittest
import numpy
import math
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import adc
from pyscf import mp
r_CO = 1.21
r_CH = 1.12
theta = 116.5
x = r_CH * math.cos(theta * math.pi/(2 * 180.0))
y = r_CH * math.sin(theta * math.pi/(2 * 180.0))
class KnownValues(unittest.TestCase):
def test_check_mp2(self):
mol = gto.Mole()
mol.atom = [
['C', ( 0.0, 0.0, 0.0)],
['O', ( 0.0, r_CO , 0.0)],
['H', ( 0.0, -x, y)],
['H', ( 0.0, -x , -y)],]
mol.basis = '3-21g'
mol.verbose = 7
mol.output = '/dev/null'
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
# Ensure phase
mf.mo_coeff[:,mf.mo_coeff.sum(axis=0) < 0] *= -1
mp2 = mp.MP2(mf)
e_mp2 = mp2.kernel()[0]
myadc = adc.ADC(mf)
myadc.max_memory = 1
e_adc_mp2, t_amp1, t_amp2 = myadc.kernel_gs()
diff_mp2 = e_adc_mp2 - e_mp2
self.assertAlmostEqual(diff_mp2, 0.0000000000000, 6)
t_amp1_n = numpy.linalg.norm(t_amp1[0])
t_amp2_n = numpy.linalg.norm(t_amp2[0])
self.assertAlmostEqual(t_amp1_n, 0.0430170343900, 6)
self.assertAlmostEqual(t_amp2_n, 0.2427924473992, 6)
self.assertAlmostEqual(lib.fp(t_amp1[0]), 0.0196348032865, 6)
self.assertAlmostEqual(lib.fp(t_amp2[0]), 0.1119036046756, 6)
def test_check_mp2_high_cost(self):
mol = gto.Mole()
mol.atom = [
['C', ( 0.0, 0.0, 0.0)],
['O', ( 0.0, r_CO , 0.0)],
['H', ( 0.0, -x, y)],
['H', ( 0.0, -x , -y)],]
mol.basis = {'H': 'aug-cc-pVQZ',
'C': 'aug-cc-pVQZ',
'O': 'aug-cc-pVQZ',}
mol.verbose = 7
mol.output = '/dev/null'
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
# Ensure phase
mf.mo_coeff[:,mf.mo_coeff.sum(axis=0) < 0] *= -1
mp2 = mp.MP2(mf)
e_mp2 = mp2.kernel()[0]
myadc = adc.ADC(mf)
myadc.max_memory = 20
e_adc_mp2, t_amp1, t_amp2 = myadc.kernel_gs()
diff_mp2 = e_adc_mp2 - e_mp2
self.assertAlmostEqual(diff_mp2, 0.0000000000000, 6)
t_amp1_n = numpy.linalg.norm(t_amp1[0])
t_amp2_n = numpy.linalg.norm(t_amp2[0])
self.assertAlmostEqual(t_amp1_n, 0.0456504320024, 6)
self.assertAlmostEqual(t_amp2_n, 0.2977897530749, 6)
self.assertAlmostEqual(lib.fp(t_amp1[0]), -0.008983054536, 6)
self.assertAlmostEqual(lib.fp(t_amp2[0]), -0.639727653133, 6)
if __name__ == "__main__":
print("Ground state calculations for small memory RADC methods for H2CO molecule")
unittest.main()
|
sunqm/pyscf
|
pyscf/adc/test/test_radc/test_H2CO_radc_outcore_small_mem.py
|
Python
|
apache-2.0
| 3,532
|
[
"PySCF"
] |
af09b821d616a3313ba30290e102ae70dec0058193544a5905b6b5697f9474bc
|
"""
JSON Renderer
"""
from . import json_template
from .. import renderer_result
from ...visitors import data_visitor as dv
class JsonPrimitiveRenderer(dv.DataVisitor):
def __init__(self, string_stream, new_line, render_name=True):
self.string_stream = string_stream
self.new_line = new_line
self.render_name = render_name
def visit(self, data):
super(JsonPrimitiveRenderer, self).visit(data)
def enter_primitive(self, data):
self.string_stream.append(json_template.level(data['level']))
if self.render_name:
self.string_stream.append(json_template.render_name(data['name']))
if (data['type'] == 'long' or
data['type'] == 'long int' or
data['type'] == 'int' or
data['type'] == 'short' or
data['type'] == 'short int' or
data['type'] == 'char'):
self.string_stream.append(json_template.int_default_value())
elif data['type'] == 'double' or data['type'] == 'float':
self.string_stream.append(json_template.float_default_value())
elif data['type'] == 'bool':
self.string_stream.append(json_template.boolean_default_value())
self.string_stream.append(json_template.sep())
self.string_stream.append(self.new_line)
class JsonStringRenderer(dv.DataVisitor):
def __init__(self, string_stream, new_line, render_name=True):
self.string_stream = string_stream
self.new_line = new_line
self.render_name = render_name
def visit(self, data):
super(JsonStringRenderer, self).visit(data)
def enter_string(self, data):
self.string_stream.append(json_template.level(data['level']))
if self.render_name:
self.string_stream.append(json_template.render_name(data['name']))
self.string_stream.append(json_template.string_default_value())
self.string_stream.append(json_template.sep())
self.string_stream.append(self.new_line)
class JsonObjectRenderer(dv.DataVisitor):
def __init__(self, string_stream, new_line):
self.string_stream = string_stream
self.new_line = new_line
def visit(self, data):
super(JsonObjectRenderer, self).visit(data)
def enter_userdefined(self, data):
if (not data['is_vector_elt'] and
dv.is_userdefined_root(data)):
return
self.string_stream.append(json_template.level(data['level']))
if not data['is_vector_elt']:
self.string_stream.append(json_template.render_name(
data['name']))
self.string_stream.append(json_template.object_begin())
self.string_stream.append(self.new_line)
def exit_userdefined(self, data):
if (not data['is_vector_elt'] and
dv.is_userdefined_root(data)):
return
self.string_stream.pop_back(2)
self.string_stream.append(self.new_line)
self.string_stream.append(json_template.level(data['level']))
self.string_stream.append(json_template.object_end())
self.string_stream.append(json_template.sep())
self.string_stream.append(self.new_line)
def enter_primitive(self, data):
render_name = not data['is_vector_elt']
r = JsonPrimitiveRenderer(self.string_stream,
self.new_line,
render_name)
return r
def enter_string(self, data):
render_name = not data['is_vector_elt']
r = JsonStringRenderer(self.string_stream,
self.new_line,
render_name)
return r
def enter_vector(self, data):
self.string_stream.append(json_template.level(data['level']))
self.string_stream.append(json_template.render_name(data['name']))
self.string_stream.append(json_template.array_begin())
self.string_stream.append(self.new_line)
def exit_vector(self, data):
self.string_stream.pop_back(2)
self.string_stream.append(self.new_line)
self.string_stream.append(json_template.level(data['level']))
self.string_stream.append(json_template.array_end())
self.string_stream.append(json_template.sep())
self.string_stream.append(self.new_line)
class JsonRootRenderer(dv.DataVisitor):
def __init__(self, string_stream, new_line):
self.string_stream = string_stream
self.new_line = new_line
def visit(self, data):
super(JsonRootRenderer, self).visit(data)
def enter_root(self, data):
r = JsonObjectRenderer(self.string_stream, self.new_line)
return r
class JsonRenderer(dv.DataVisitor):
"""
JSON array -> std::vector
JSON string -> std::string
JSON number -> double, float, long, int, short, char
JSON true -> bool
JSON false -> bool
JSON null -> not supported
"""
def __init__(self, new_line=''):
self.new_line = new_line
self.string_stream = renderer_result.RendererResult()
def visit(self, data):
super(JsonRenderer, self).visit(data)
def enter_root(self, data):
self.string_stream.append(json_template.object_begin())
self.string_stream.append(self.new_line)
r = JsonRootRenderer(self.string_stream, self.new_line)
return r
def exit_root(self, data):
self.string_stream.pop_back(2)
self.string_stream.append(self.new_line)
self.string_stream.append(json_template.level(data['level']))
self.string_stream.append(json_template.object_end())
def get_string(self):
return self.string_stream.get_string()
def dump(self, filename):
with open(filename, 'w') as f:
f.write(self.get_string())
f.flush()
|
svperbeast/plain_data_companion
|
src/templates/json/json_renderer.py
|
Python
|
mit
| 5,861
|
[
"VisIt"
] |
bb22b20e930148dce847ac34f9c2cd343c5fc97aa861e795f71f207f49faf927
|
###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
from slimit import ast
class ECMAVisitor(object):
def __init__(self):
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_%s' % node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
return 'GEN: %r' % node
def visit_Program(self, node):
return '\n'.join(self.visit(child) for child in node)
def visit_Block(self, node):
s = '{\n'
self.indent_level += 2
s += '\n'.join(
self._make_indent() + self.visit(child) for child in node)
self.indent_level -= 2
s += '\n' + self._make_indent() + '}'
return s
def visit_VarStatement(self, node):
s = 'var %s;' % ', '.join(self.visit(child) for child in node)
return s
def visit_VarDecl(self, node):
output = []
output.append(self.visit(node.identifier))
if node.initializer is not None:
output.append(' = %s' % self.visit(node.initializer))
return ''.join(output)
def visit_Identifier(self, node):
return node.value
def visit_Assign(self, node):
if node.op == ':':
template = '%s%s %s'
else:
template = '%s %s %s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_GetPropAssign(self, node):
template = 'get %s() {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), body, tail)
def visit_SetPropAssign(self, node):
template = 'set %s(%s) {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
if len(node.parameters) > 1:
raise SyntaxError(
'Setter functions must have one argument: %s' % node)
params = ','.join(self.visit(param) for param in node.parameters)
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), params, body, tail)
def visit_Number(self, node):
return node.value
def visit_Comma(self, node):
s = '%s, %s' % (self.visit(node.left), self.visit(node.right))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_EmptyStatement(self, node):
return node.value
def visit_If(self, node):
s = 'if ('
if node.predicate is not None:
s += self.visit(node.predicate)
s += ') '
s += self.visit(node.consequent)
if node.alternative is not None:
s += ' else '
s += self.visit(node.alternative)
return s
def visit_Boolean(self, node):
return node.value
def visit_For(self, node):
s = 'for ('
if node.init is not None:
s += self.visit(node.init)
if node.init is None:
s += ' ; '
elif isinstance(node.init, (ast.Assign, ast.Comma, ast.FunctionCall,
ast.UnaryOp, ast.Identifier, ast.BinOp,
ast.Conditional, ast.Regex, ast.NewExpr)):
s += '; '
else:
s += ' '
if node.cond is not None:
s += self.visit(node.cond)
s += '; '
if node.count is not None:
s += self.visit(node.count)
s += ') ' + self.visit(node.statement)
return s
def visit_ForIn(self, node):
if isinstance(node.item, ast.VarDecl):
template = 'for (var %s in %s) '
else:
template = 'for (%s in %s) '
s = template % (self.visit(node.item), self.visit(node.iterable))
s += self.visit(node.statement)
return s
def visit_BinOp(self, node):
if getattr(node, '_parens', False):
template = '(%s %s %s)'
else:
template = '%s %s %s'
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.postfix:
s += node.op
elif node.op in ('delete', 'void', 'typeof'):
s = '%s %s' % (node.op, s)
else:
s = '%s%s' % (node.op, s)
if getattr(node, '_parens', False):
s = '(%s)' % s
return s
def visit_ExprStatement(self, node):
return '%s;' % self.visit(node.expr)
def visit_DoWhile(self, node):
s = 'do '
s += self.visit(node.statement)
s += ' while (%s);' % self.visit(node.predicate)
return s
def visit_While(self, node):
s = 'while (%s) ' % self.visit(node.predicate)
s += self.visit(node.statement)
return s
def visit_Null(self, node):
return 'null'
def visit_String(self, node):
return node.value
def visit_Continue(self, node):
if node.identifier is not None:
s = 'continue %s;' % self.visit_Identifier(node.identifier)
else:
s = 'continue;'
return s
def visit_Break(self, node):
if node.identifier is not None:
s = 'break %s;' % self.visit_Identifier(node.identifier)
else:
s = 'break;'
return s
def visit_Return(self, node):
if node.expr is None:
return 'return;'
else:
return 'return %s;' % self.visit(node.expr)
def visit_With(self, node):
s = 'with (%s) ' % self.visit(node.expr)
s += self.visit(node.statement)
return s
def visit_Label(self, node):
s = '%s: %s' % (
self.visit(node.identifier), self.visit(node.statement))
return s
def visit_Switch(self, node):
s = 'switch (%s) {\n' % self.visit(node.expr)
self.indent_level += 2
for case in node.cases:
s += self._make_indent() + self.visit_Case(case)
if node.default is not None:
s += self.visit_Default(node.default)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def visit_Case(self, node):
s = 'case %s:\n' % self.visit(node.expr)
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if elements:
s += elements + '\n'
self.indent_level -= 2
return s
def visit_Default(self, node):
s = self._make_indent() + 'default:\n'
self.indent_level += 2
s += '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if node.elements is not None:
s += '\n'
self.indent_level -= 2
return s
def visit_Throw(self, node):
s = 'throw %s;' % self.visit(node.expr)
return s
def visit_Debugger(self, node):
return '%s;' % node.value
def visit_Try(self, node):
s = 'try '
s += self.visit(node.statements)
if node.catch is not None:
s += ' ' + self.visit(node.catch)
if node.fin is not None:
s += ' ' + self.visit(node.fin)
return s
def visit_Catch(self, node):
s = 'catch (%s) %s' % (
self.visit(node.identifier), self.visit(node.elements))
return s
def visit_Finally(self, node):
s = 'finally %s' % self.visit(node.elements)
return s
def visit_FuncDecl(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
s = 'function %s(%s) {\n%s' % (
self.visit(node.identifier),
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
return s
def visit_FuncExpr(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
ident = node.identifier
ident = '' if ident is None else ' %s' % self.visit(ident)
header = 'function%s(%s)'
if getattr(node, '_parens', False):
header = '(' + header
s = (header + ' {\n%s') % (
ident,
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
if getattr(node, '_parens', False):
s += ')'
return s
def visit_Conditional(self, node):
if getattr(node, '_parens', False):
template = '(%s ? %s : %s)'
else:
template = '%s ? %s : %s'
s = template % (
self.visit(node.predicate),
self.visit(node.consequent), self.visit(node.alternative))
return s
def visit_Regex(self, node):
if getattr(node, '_parens', False):
return '(%s)' % node.value
else:
return node.value
def visit_NewExpr(self, node):
s = 'new %s(%s)' % (
self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args)
)
return s
def visit_DotAccessor(self, node):
if getattr(node, '_parens', False):
template = '(%s.%s)'
else:
template = '%s.%s'
left = self.visit(node.node)
if isinstance(node.node, ast.Number):
left = '(%s)' % left
s = template % (left, self.visit(node.identifier))
return s
def visit_BracketAccessor(self, node):
s = '%s[%s]' % (self.visit(node.node), self.visit(node.expr))
return s
def visit_FunctionCall(self, node):
s = '%s(%s)' % (self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_Object(self, node):
s = '{\n'
self.indent_level += 2
s += ',\n'.join(self._make_indent() + self.visit(prop)
for prop in node.properties)
self.indent_level -= 2
if node.properties:
s += '\n'
s += self._make_indent() + '}'
return s
def visit_Array(self, node):
s = '['
length = len(node.items) - 1
for index, item in enumerate(node.items):
if isinstance(item, ast.Elision):
s += ','
elif index != length:
s += self.visit(item) + ','
else:
s += self.visit(item)
s += ']'
return s
def visit_This(self, node):
return 'this'
|
moses-palmer/slimit
|
src/slimit/visitors/ecmavisitor.py
|
Python
|
mit
| 12,856
|
[
"VisIt"
] |
d5547a546d4bd5134691b51078994537fe3da311337036853b62af3b51bccae0
|
#!/usr/bin/env python
"""
binreads is a simple script that uses pysam to find retrieve reads over a region
from read mapping analysis (BAMfiles).
Script requires a **sorted** BAM file and you need to specify the name of the
reference sequence used (Just check with samtool view). If you want to specify
a subregion use the -r flag with the start-stop positions immediately after.
The -u flag will attempt to retrieve reads that did not have a properly mapped
pair, these are ignored by default.
Logging/error messages are sent to STDERR, while the read sequences (in fastq)
are sent to STDOUT. Please pipe output accordingly.
### CHANGE LOG ###
2013-09-11 Nabil-Fareed Alikhan <n.alikhan@uq.edu.au>
* Initial build
"""
import sys, os, traceback, argparse
import time
import __init__ as meta
import pysam
epi = "Licence: "+meta.__licence__ + " by " +meta.__author__ + " <" +meta.__author_email__ + ">"
def main ():
global args
samfile = pysam.Samfile(args.bamfile, 'rb')
samstream =''
try:
if args.region == None:
samstream = samfile.fetch(args.chr)
else:
samstream = samfile.fetch(args.chr, args.region[0], args.region[1])
except ValueError:
sys.stderr.write('Error parsing BAM file, is it sorted?\n')
unmapped = {}
if not args.unmapped: sys.stderr.write('Unmapped mates\n')
for alignread in samstream:
if not alignread.mate_is_unmapped:
print '@%s/1' %alignread.qname
print '%s' %alignread.seq
print '+%s/1' %alignread.qname
print '%s' %alignread.qual
mate = samfile.mate(alignread)
print '@%s/2' %mate.qname
print '%s' %mate.seq
print '+%s/2' %mate.qname
print '%s' %mate.qual
else:
if not args.unmapped:sys.stderr.write('%s\n' %alignread.qname)
else:
unmapped[alignread.qname] = alignread.seq
if args.unmapped:
sys.stderr.write('Fetching unmapped pairs\n')
for al in samfile.fetch(until_eof = True):
# This pair is in our region but one read is unmapped
if unmapped.has_key(al.qname):
sys.stderr.write('Read pair - unmapped mate: %s \n' %al.qname)
# If the sequence is identical this is the read. else mate
if unmapped[al.qname] == al.seq:
sys.stdout.write('@%s/1\n' %al.qname)
sys.stdout.write('%s\n' %al.seq)
sys.stdout.write('+%s/1\n' %al.qname)
sys.stdout.write('%s\n' %al.qual)
else:
sys.stdout.write('@%s/2\n' %al.qname)
sys.stdout.write('%s\n' %al.seq)
sys.stdout.write('+%s/2\n' %al.qname)
sys.stdout.write('%s\n' %al.qual)
sys.stderr.write('Done.\n')
if __name__ == '__main__':
try:
start_time = time.time()
desc = __doc__.split('\n\n')[1].strip()
parser = argparse.ArgumentParser(description=desc,epilog=epi)
parser.add_argument ('-v', '--verbose', action='store_true', default=False, help='verbose output')
parser.add_argument('--version', action='version', version='%(prog)s ' + meta.__version__)
parser.add_argument('-o','--output',action='store',help='output prefix')
parser.add_argument ('bamfile', action='store', help='Bamfile to retrieve reads')
parser.add_argument ('chr', action='store', help='Chromosome/reference name')
parser.add_argument('-r','--region', action='store', type=int, nargs =2, help='subregion to grab reads')
parser.add_argument('-u','--unmapped', action='store_true', help='Retrieve pairs with unmapped mates')
args = parser.parse_args()
if args.verbose: sys.stderr.write( "Executing @ %s\n" %time.asctime())
main()
if args.verbose: sys.stderr.write("Ended @ %s\n" %time.asctime())
if args.verbose: sys.stderr.write('total time in minutes: %d\n'
%((time.time() - start_time) / 60.0))
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
|
happykhan/binreads
|
binreads/binreads.py
|
Python
|
gpl-3.0
| 4,388
|
[
"pysam"
] |
60e1efe3287b063d5b221489634c2b0047a1c851430ebfe2c9619d5df1ace6dc
|
# Copyright 2014, Brian Coca <bcoca@ansible.com>
# Copyright 2017, Ken Celenza <ken@networktocode.com>
# Copyright 2017, Jason Edelman <jason@networktocode.com>
# Copyright 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import math
from jinja2.filters import pass_environment
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.common.text import formatters
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.common._collections_compat import Hashable, Mapping, Iterable
from ansible.module_utils._text import to_native, to_text
from ansible.utils.display import Display
try:
from jinja2.filters import do_unique
HAS_UNIQUE = True
except ImportError:
HAS_UNIQUE = False
display = Display()
@pass_environment
# Use case_sensitive=None as a sentinel value, so we raise an error only when
# explicitly set and cannot be handle (by Jinja2 w/o 'unique' or fallback version)
def unique(environment, a, case_sensitive=None, attribute=None):
def _do_fail(e):
if case_sensitive is False or attribute:
raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
"as it does not support the parameters supplied", orig_exc=e)
error = e = None
try:
if HAS_UNIQUE:
c = list(do_unique(environment, a, case_sensitive=bool(case_sensitive), attribute=attribute))
except TypeError as e:
error = e
_do_fail(e)
except Exception as e:
error = e
_do_fail(e)
display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive is False or attribute:
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive=False nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
c = []
for x in a:
if x not in c:
c.append(x)
return c
@pass_environment
def intersect(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) & set(b)
else:
c = unique(environment, [x for x in a if x in b], True)
return c
@pass_environment
def difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) - set(b)
else:
c = unique(environment, [x for x in a if x not in b], True)
return c
@pass_environment
def symmetric_difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) ^ set(b)
else:
isect = intersect(environment, a, b)
c = [x for x in union(environment, a, b) if x not in isect]
return c
@pass_environment
def union(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) | set(b)
else:
c = unique(environment, a + b, True)
return c
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return formatters.bytes_to_human(size, isbits, unit)
except TypeError as e:
raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return formatters.human_to_bytes(size, default_unit, isbits)
except TypeError as e:
raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
def rekey_on_member(data, key, duplicates='error'):
"""
Rekey a dict of dicts on another member
May also create a dict from a list of dicts.
duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
new_obj = {}
# Ensure the positional args are defined - raise jinja2.exceptions.UndefinedError if not
bool(data) and bool(key)
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
raise AnsibleFilterTypeError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
raise AnsibleFilterError("Key {0} was not found".format(key))
except TypeError as e:
raise AnsibleFilterTypeError(to_native(e))
except Exception as e:
raise AnsibleFilterError(to_native(e))
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimum contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
new_obj[key_elem] = item
return new_obj
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'product': itertools.product,
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
'rekey_on_member': rekey_on_member,
# zip
'zip': zip,
'zip_longest': itertools.zip_longest,
}
return filters
|
privateip/ansible
|
lib/ansible/plugins/filter/mathstuff.py
|
Python
|
gpl-3.0
| 8,342
|
[
"Brian"
] |
9ef5cb1549dcf8d71a6431d5a33688694e1b71bb72214a43b2265f84f31127eb
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_virtualservice
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of VirtualService Avi RESTful Object
description:
- This module is used to configure VirtualService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
active_standby_se_tag:
description:
- This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled.
- This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine.
- Virtualservices with different tags will have different active serviceengines.
- If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine.
- Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers.
- Redistribution is based on the auto redistribute property of the serviceenginegroup.
- Enum options - ACTIVE_STANDBY_SE_1, ACTIVE_STANDBY_SE_2.
- Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1.
analytics_policy:
description:
- Determines analytics settings for the application.
analytics_profile_ref:
description:
- Specifies settings related to analytics.
- It is a reference to an object of type analyticsprofile.
application_profile_ref:
description:
- Enable application layer specific features for the virtual service.
- It is a reference to an object of type applicationprofile.
auto_allocate_floating_ip:
description:
- Auto-allocate floating/elastic ip from the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
auto_allocate_ip:
description:
- Auto-allocate vip from the provided subnet.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
availability_zone:
description:
- Availability-zone to place the virtual service.
- Field deprecated in 17.1.1.
avi_allocated_fip:
description:
- (internal-use) fip allocated by avi in the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
avi_allocated_vip:
description:
- (internal-use) vip allocated by avi in the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
client_auth:
description:
- Http authentication configuration for protected resources.
close_client_conn_on_config_update:
description:
- Close client connection on vs config update.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
cloud_config_cksum:
description:
- Checksum of cloud configuration for vs.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
cloud_type:
description:
- Enum options - cloud_none, cloud_vcenter, cloud_openstack, cloud_aws, cloud_vca, cloud_apic, cloud_mesos, cloud_linuxserver, cloud_docker_ucp,
- cloud_rancher, cloud_oshift_k8s, cloud_azure.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
connections_rate_limit:
description:
- Rate limit the incoming connections to this virtual service.
content_rewrite:
description:
- Profile used to match and rewrite strings in request and/or response body.
created_by:
description:
- Creator name.
delay_fairness:
description:
- Select the algorithm for qos fairness.
- This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
description:
description:
- User defined description for the object.
discovered_network_ref:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is deprecated.
- It is a reference to an object of type network.
- Field deprecated in 17.1.1.
discovered_networks:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is used internally by avi, not editable by the user.
- Field deprecated in 17.1.1.
discovered_subnet:
description:
- (internal-use) discovered subnets providing reachability for client facing virtual service ip.
- This field is deprecated.
- Field deprecated in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Note that only one of fqdn and dns_info setting is allowed.
dns_policies:
description:
- Dns policies applied on the dns traffic of the virtual service.
- Field introduced in 17.1.1.
version_added: "2.4"
east_west_placement:
description:
- Force placement on all se's in service group (mesos mode only).
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
enable_autogw:
description:
- Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
enable_rhi:
description:
- Enable route health injection using the bgp config in the vrf context.
type: bool
enable_rhi_snat:
description:
- Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context.
type: bool
enabled:
description:
- Enable or disable the virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
error_page_profile_ref:
description:
- Error page profile to be used for this virtualservice.this profile is used to send the custom error page to the client generated by the proxy.
- It is a reference to an object of type errorpageprofile.
- Field introduced in 17.2.4.
version_added: "2.5"
floating_ip:
description:
- Floating ip to associate with this virtual service.
- Field deprecated in 17.1.1.
floating_subnet_uuid:
description:
- If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation.
- This field is applicable only if the virtualservice belongs to an openstack or aws cloud.
- In openstack or aws cloud it is required when auto_allocate_floating_ip is selected.
- Field deprecated in 17.1.1.
flow_dist:
description:
- Criteria for flow distribution among ses.
- Enum options - LOAD_AWARE, CONSISTENT_HASH_SOURCE_IP_ADDRESS, CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT.
- Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE.
flow_label_type:
description:
- Criteria for flow labelling.
- Enum options - NO_LABEL, APPLICATION_LABEL, SERVICE_LABEL.
- Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL.
fqdn:
description:
- Dns resolvable, fully qualified domain name of the virtualservice.
- Only one of 'fqdn' and 'dns_info' configuration is allowed.
host_name_xlate:
description:
- Translate the host name sent to the servers to this value.
- Translate the host name sent from servers back to the value used by the client.
http_policies:
description:
- Http policies applied on the data traffic of the virtual service.
ign_pool_net_reach:
description:
- Ignore pool servers network reachability constraints for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
ip_address:
description:
- Ip address of the virtual service.
- Field deprecated in 17.1.1.
ipam_network_subnet:
description:
- Subnet and/or network for allocating virtualservice ip by ipam provider module.
- Field deprecated in 17.1.1.
limit_doser:
description:
- Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
max_cps_per_client:
description:
- Maximum connections per second per client ip.
- Allowed values are 10-1000.
- Special values are 0- 'unlimited'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
microservice_ref:
description:
- Microservice representing the virtual service.
- It is a reference to an object of type microservice.
name:
description:
- Name for the virtual service.
required: true
network_profile_ref:
description:
- Determines network settings such as protocol, tcp or udp, and related options for the protocol.
- It is a reference to an object of type networkprofile.
network_ref:
description:
- Manually override the network on which the virtual service is placed.
- It is a reference to an object of type network.
- Field deprecated in 17.1.1.
network_security_policy_ref:
description:
- Network security policies for the virtual service.
- It is a reference to an object of type networksecuritypolicy.
nsx_securitygroup:
description:
- A list of nsx service groups representing the clients which can access the virtual ip of the virtual service.
- Field introduced in 17.1.1.
version_added: "2.4"
performance_limits:
description:
- Optional settings that determine performance limits like max connections or bandwdith etc.
pool_group_ref:
description:
- The pool group is an object that contains pools.
- It is a reference to an object of type poolgroup.
pool_ref:
description:
- The pool is an object that contains destination servers and related attributes such as load-balancing and persistence.
- It is a reference to an object of type pool.
port_uuid:
description:
- (internal-use) network port assigned to the virtual service ip address.
- Field deprecated in 17.1.1.
remove_listening_port_on_vs_down:
description:
- Remove listening port if virtualservice is down.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
requests_rate_limit:
description:
- Rate limit the incoming requests to this virtual service.
scaleout_ecmp:
description:
- Disable re-distribution of flows across service engines for a virtual service.
- Enable if the network itself performs flow hashing with ecmp in environments such as gcp.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
se_group_ref:
description:
- The service engine group to use for this virtual service.
- Moving to a new se group is disruptive to existing connections for this vs.
- It is a reference to an object of type serviceenginegroup.
server_network_profile_ref:
description:
- Determines the network settings profile for the server side of tcp proxied connections.
- Leave blank to use the same settings as the client to vs side of the connection.
- It is a reference to an object of type networkprofile.
service_metadata:
description:
- Metadata pertaining to the service provided by this virtual service.
- In openshift/kubernetes environments, egress pod info is stored.
- Any user input to this field will be overwritten by avi vantage.
version_added: "2.4"
service_pool_select:
description:
- Select pool based on destination port.
services:
description:
- List of services defined for this virtual service.
sideband_profile:
description:
- Sideband configuration to be used for this virtualservice.it can be used for sending traffic to sideband vips for external inspection etc.
version_added: "2.4"
snat_ip:
description:
- Nat'ted floating source ip address(es) for upstream connection to servers.
sp_pool_refs:
description:
- Gslb pools used to manage site-persistence functionality.
- Each site-persistence pool contains the virtualservices in all the other sites, that is auto-generated by the gslb manager.
- This is a read-only field for the user.
- It is a reference to an object of type pool.
- Field introduced in 17.2.2.
version_added: "2.5"
ssl_key_and_certificate_refs:
description:
- Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections.
- It is a reference to an object of type sslprofile.
ssl_sess_cache_avg_size:
description:
- Expected number of ssl session cache entries (may be exceeded).
- Allowed values are 1024-16383.
- Default value when not specified in API or module is interpreted by Avi Controller as 1024.
static_dns_records:
description:
- List of static dns records applied to this virtual service.
- These are static entries and no health monitoring is performed against the ip addresses.
subnet:
description:
- Subnet providing reachability for client facing virtual service ip.
- Field deprecated in 17.1.1.
subnet_uuid:
description:
- It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud.
- This field is required if auto_allocate_ip is true.
- Field deprecated in 17.1.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
traffic_clone_profile_ref:
description:
- Server network or list of servers for cloning traffic.
- It is a reference to an object of type trafficcloneprofile.
- Field introduced in 17.1.1.
version_added: "2.4"
type:
description:
- Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service.
- Enum options - VS_TYPE_NORMAL, VS_TYPE_VH_PARENT, VS_TYPE_VH_CHILD.
- Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL.
url:
description:
- Avi controller URL of the object.
use_bridge_ip_as_vip:
description:
- Use bridge ip as vip on each host in mesos deployments.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
use_vip_as_snat:
description:
- Use the virtual ip as the snat ip for health monitoring and sending traffic to the backend servers instead of the service engine interface ip.
- The caveat of enabling this option is that the virtualservice cannot be configued in an active-active ha mode.
- Dns based multi vip solution has to be used for ha & non-disruptive upgrade purposes.
- Field introduced in 17.1.9,17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
uuid:
description:
- Uuid of the virtualservice.
vh_domain_name:
description:
- The exact name requested from the client's sni-enabled tls hello domain name field.
- If this is a match, the parent vs will forward the connection to this child vs.
vh_parent_vs_uuid:
description:
- Specifies the virtual service acting as virtual hosting (sni) parent.
vip:
description:
- List of virtual service ips.
- While creating a 'shared vs',please use vsvip_ref to point to the shared entities.
- Field introduced in 17.1.1.
version_added: "2.4"
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
vs_datascripts:
description:
- Datascripts applied on the data traffic of the virtual service.
vsvip_ref:
description:
- Mostly used during the creation of shared vs, this field refers to entities that can be shared across virtual services.
- It is a reference to an object of type vsvip.
- Field introduced in 17.1.1.
version_added: "2.4"
waf_policy_ref:
description:
- Waf policy for the virtual service.
- It is a reference to an object of type wafpolicy.
- Field introduced in 17.2.1.
version_added: "2.5"
weight:
description:
- The quality of service weight to assign to traffic transmitted from this virtual service.
- A higher weight will prioritize traffic versus other virtual services sharing the same service engines.
- Allowed values are 1-128.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL Virtual Service using Pool testpool2
avi_virtualservice:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: newtestvs
state: present
performance_limits:
max_concurrent_connections: 1000
services:
- port: 443
enable_ssl: true
- port: 80
ssl_profile_ref: '/api/sslprofile?name=System-Standard'
application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP'
ssl_key_and_certificate_refs:
- '/api/sslkeyandcertificate?name=System-Default-Cert'
ip_address:
addr: 10.90.131.103
type: V4
pool_ref: '/api/pool?name=testpool2'
"""
RETURN = '''
obj:
description: VirtualService (api/virtualservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
active_standby_se_tag=dict(type='str',),
analytics_policy=dict(type='dict',),
analytics_profile_ref=dict(type='str',),
application_profile_ref=dict(type='str',),
auto_allocate_floating_ip=dict(type='bool',),
auto_allocate_ip=dict(type='bool',),
availability_zone=dict(type='str',),
avi_allocated_fip=dict(type='bool',),
avi_allocated_vip=dict(type='bool',),
client_auth=dict(type='dict',),
close_client_conn_on_config_update=dict(type='bool',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
cloud_type=dict(type='str',),
connections_rate_limit=dict(type='dict',),
content_rewrite=dict(type='dict',),
created_by=dict(type='str',),
delay_fairness=dict(type='bool',),
description=dict(type='str',),
discovered_network_ref=dict(type='list',),
discovered_networks=dict(type='list',),
discovered_subnet=dict(type='list',),
dns_info=dict(type='list',),
dns_policies=dict(type='list',),
east_west_placement=dict(type='bool',),
enable_autogw=dict(type='bool',),
enable_rhi=dict(type='bool',),
enable_rhi_snat=dict(type='bool',),
enabled=dict(type='bool',),
error_page_profile_ref=dict(type='str',),
floating_ip=dict(type='dict',),
floating_subnet_uuid=dict(type='str',),
flow_dist=dict(type='str',),
flow_label_type=dict(type='str',),
fqdn=dict(type='str',),
host_name_xlate=dict(type='str',),
http_policies=dict(type='list',),
ign_pool_net_reach=dict(type='bool',),
ip_address=dict(type='dict',),
ipam_network_subnet=dict(type='dict',),
limit_doser=dict(type='bool',),
max_cps_per_client=dict(type='int',),
microservice_ref=dict(type='str',),
name=dict(type='str', required=True),
network_profile_ref=dict(type='str',),
network_ref=dict(type='str',),
network_security_policy_ref=dict(type='str',),
nsx_securitygroup=dict(type='list',),
performance_limits=dict(type='dict',),
pool_group_ref=dict(type='str',),
pool_ref=dict(type='str',),
port_uuid=dict(type='str',),
remove_listening_port_on_vs_down=dict(type='bool',),
requests_rate_limit=dict(type='dict',),
scaleout_ecmp=dict(type='bool',),
se_group_ref=dict(type='str',),
server_network_profile_ref=dict(type='str',),
service_metadata=dict(type='str',),
service_pool_select=dict(type='list',),
services=dict(type='list',),
sideband_profile=dict(type='dict',),
snat_ip=dict(type='list',),
sp_pool_refs=dict(type='list',),
ssl_key_and_certificate_refs=dict(type='list',),
ssl_profile_ref=dict(type='str',),
ssl_sess_cache_avg_size=dict(type='int',),
static_dns_records=dict(type='list',),
subnet=dict(type='dict',),
subnet_uuid=dict(type='str',),
tenant_ref=dict(type='str',),
traffic_clone_profile_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
use_bridge_ip_as_vip=dict(type='bool',),
use_vip_as_snat=dict(type='bool',),
uuid=dict(type='str',),
vh_domain_name=dict(type='list',),
vh_parent_vs_uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
vs_datascripts=dict(type='list',),
vsvip_ref=dict(type='str',),
waf_policy_ref=dict(type='str',),
weight=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'virtualservice',
set([]))
if __name__ == '__main__':
main()
|
hkariti/ansible
|
lib/ansible/modules/network/avi/avi_virtualservice.py
|
Python
|
gpl-3.0
| 26,779
|
[
"VisIt"
] |
fd05a879056d9a426380c867e3bae2ea39d27f4942483825e9a6c725c8f899b3
|
########################################################################
# File: TracedTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/08/08 15:21:32
########################################################################
""" :mod: TracedTests
=======================
.. module: TracedTests
:synopsis: Traced test cases
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
Traced test cases
"""
## metaclass stuff too smart for pylint
#pylint: disable=no-member
__RCSID__ = "$Id $"
##
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/08/08 15:21:44
## imports
import unittest
## SUT
from DIRAC.Core.Utilities.Traced import Traced, TracedDict, TracedList
########################################################################
class TracedTests(unittest.TestCase):
"""
.. class:: TracedTests
"""
def setUp( self ):
"""c'tor
:param self: self reference
"""
self.tracedDict = TracedDict( { 1 : 1 } )
self.tracedList = TracedList( [ 1 ] )
class TracedClass( object ):
__metaclass__ = Traced
classArg = None
def __init__( self ):
instanceArg = None
self.tracedClass = TracedClass()
def testTarcedDict( self ):
""" TracedDict tests """
self.assertEqual( self.tracedDict.updated(), [] )
## update, not changing value
self.tracedDict[1] = 1
self.assertEqual( self.tracedDict.updated(), [] )
## update, changing value
self.tracedDict[1] = 2
self.assertEqual( self.tracedDict.updated(), [1] )
## set new
self.tracedDict[2] = 2
self.assertEqual( self.tracedDict.updated(), [ 1, 2 ] )
## update from diff dict
self.tracedDict.update( { 3: 3 } )
self.assertEqual( self.tracedDict.updated(), [ 1, 2, 3 ] )
def testTracedList( self ):
""" traced list """
self.assertEqual( self.tracedList.updated(), [] )
## no value change
self.tracedList[0] = 1
self.assertEqual( self.tracedList.updated(), [] )
## value change
self.tracedList[0] = 2
self.assertEqual( self.tracedList.updated(), [0] )
## append
self.tracedList.append( 1 )
self.assertEqual( self.tracedList.updated(), [0, 1] )
def testTracedClass( self ):
""" traced class """
self.assertEqual( self.tracedClass.updated(), [] )
self.tracedClass.instanceArg = 1
self.assertEqual( self.tracedClass.updated(), [ "instanceArg" ] )
self.tracedClass.classArg = 1
self.assertEqual( self.tracedClass.updated(), [ "instanceArg" , "classArg" ] )
## test execution
if __name__ == "__main__":
TESTLOADER = unittest.TestLoader()
SUITE = TESTLOADER.loadTestsFromTestCase( TracedTests )
unittest.TextTestRunner(verbosity=3).run( SUITE )
|
andresailer/DIRAC
|
Core/Utilities/test/Test_Traced.py
|
Python
|
gpl-3.0
| 2,735
|
[
"DIRAC"
] |
c32ba93c0a66b0030c487494c90674da84b58f80d5867a365de4979a69bb71c7
|
# $Id$
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import doctest
import unittest
from rdkit import Chem
from rdkit import rdBase
from rdkit.Chem.Fraggle import FraggleSim
from rdkit.Chem.Fraggle.FraggleSim import select_fragments
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(
doctest.DocTestSuite(FraggleSim, optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE))
return tests
def _of(smiles):
""" Order the fragments alphabetically. If smiles is None, returns None """
if smiles is None:
return None
return '.'.join(sorted(smiles.split('.')))
class TestCase(unittest.TestCase):
def test_generate_fraggle_fragmentation(self):
mol = Chem.MolFromSmiles('COc1cc(CN2CCC(CC2)NC(=O)c2cncc(C)c2)c(OC)c2ccccc12')
frags = FraggleSim.generate_fraggle_fragmentation(mol)
self.assertEqual(len(frags), 16)
expected = (
'*C(=O)NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'*C(=O)c1cncc(C)c1.*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'*C(=O)c1cncc(C)c1.*c1cc(OC)c2ccccc2c1OC', '*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'*C(=O)c1cncc(C)c1.*Cc1cc(OC)c2ccccc2c1OC',
'*Cc1cc(OC)c2ccccc2c1OC.*NC(=O)c1cncc(C)c1', '*Cc1cc(OC)c2ccccc2c1OC.*c1cncc(C)c1',
'*NC(=O)c1cncc(C)c1.*c1cc(OC)c2ccccc2c1OC', '*NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'*NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.*c1cncc(C)c1',
'*c1c(CN2CCC(NC(=O)c3cncc(C)c3)CC2)cc(OC)c2ccccc12',
'*c1c(OC)cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c1*',
'*c1cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c2ccccc12',
'*N1CCC(NC(=O)c2cncc(C)c2)CC1.*c1cc(OC)c2ccccc2c1OC',
'*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.*c1cncc(C)c1', '*c1cc(OC)c2ccccc2c1OC.*c1cncc(C)c1')
expected = [_of(s) for s in expected]
for smi in frags:
self.assertTrue(_of(smi) in expected)
# Test case for fragments that contain a cyclic and acyclic component
mol = Chem.MolFromSmiles('c12c(CCC)cccc2cccc1')
frags = FraggleSim.generate_fraggle_fragmentation(mol)
expected = ['*CCC.*c1ccccc1*', '*Cc1cccc2ccccc12', '*c1cccc(CCC)c1*',
'*c1cccc2ccccc12', '*c1ccccc1*']
expected = [_of(s) for s in expected]
for smi in frags:
self.assertTrue(_of(smi) in expected)
def testFragmentation2(self):
mol = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12')
frags = FraggleSim.generate_fraggle_fragmentation(mol)
self.assertEqual(len(frags), 13)
expected = (
'*C(=O)c1ccccc1.*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'*C(=O)c1ccccc1.*Cc1cc(OC)c2ccccc2c1OC', '*C(=O)c1ccccc1.*c1cc(OC)c2ccccc2c1OC',
'*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.*c1ccccc1',
'*Cc1cc(OC)c2ccccc2c1OC.*NC(=O)c1ccccc1', '*Cc1cc(OC)c2ccccc2c1OC.*c1ccccc1',
'*N1CCC(NC(=O)c2ccccc2)CC1.*c1cc(OC)c2ccccc2c1OC',
'*NC(=O)c1ccccc1.*c1cc(OC)c2ccccc2c1OC',
'*NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.*c1ccccc1',
'*c1c(CN2CCC(NC(=O)c3ccccc3)CC2)cc(OC)c2ccccc12',
'*c1c(OC)cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c1*',
'*c1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12', '*c1cc(OC)c2ccccc2c1OC.*c1ccccc1')
expected = [_of(s) for s in expected]
for smi in frags:
self.assertIn(_of(smi), expected)
def test_select_fragments(self):
self.assertRaises(NotImplementedError, select_fragments, 'CCCC', 'InvalidFragmentationType', 10)
def test_select_fragments_acyclic(self):
# acyclic fragments: returns fragments with more than two heavy atoms
self._testSelectFragment('*C.*O*.*c1cccc2ccccc12', FraggleSim.FTYPE_ACYCLIC, 12,
'*c1cccc2ccccc12')
# ignore the fragments if less than 60% of original molecule
self._testSelectFragment('*CC.*O*.*c1ccccc1', FraggleSim.FTYPE_ACYCLIC, 9,
'*c1ccccc1')
self._testSelectFragment('*CC.*OCC*.*c1ccccc1', FraggleSim.FTYPE_ACYCLIC, 11,
'*c1ccccc1')
self._testSelectFragment('*CC.*OCCC*.*c1ccccc1', FraggleSim.FTYPE_ACYCLIC, 12, None)
# ignore fragments that are smaller than 2 atoms
self._testSelectFragment('*C.*O*.*c1ccccc1', FraggleSim.FTYPE_ACYCLIC, 8, '*c1ccccc1')
self._testSelectFragment('*CC.*O*.*c1ccccc1', FraggleSim.FTYPE_ACYCLIC, 9,
'*c1ccccc1')
self._testSelectFragment('*CCC.*O*.*c1ccccc1', FraggleSim.FTYPE_ACYCLIC, 10,
'*CCC.*c1ccccc1')
# Only small fragments
self._testSelectFragment('*CC.*CC.*CC', FraggleSim.FTYPE_ACYCLIC, 6, None)
self._testSelectFragment('*CCC.*CCC.*CCC', FraggleSim.FTYPE_ACYCLIC, 6,
'*CCC.*CCC.*CCC')
def test_select_fragments_cyclic(self):
self._testSelectFragment('*c1ccccc1*.*cccc*', FraggleSim.FTYPE_CYCLIC, 10,
'*c1ccccc1*')
self._testSelectFragment('*c1ccccc1*.*C.*C', FraggleSim.FTYPE_CYCLIC, 8, None)
# Fragment too small
self._testSelectFragment('*c1ccccc1*.*c(CCCCCCCCCC)ccc*', FraggleSim.FTYPE_CYCLIC, 20,
None)
def test_select_fragments_cyclic_acyclic(self):
self._testSelectFragment('*c1ccccc1*.*CCC.*C', FraggleSim.FTYPE_CYCLIC_ACYCLIC, 10,
'*c1ccccc1*.*CCC')
self._testSelectFragment('*CCC.*C.*c1ccccc1*', FraggleSim.FTYPE_CYCLIC_ACYCLIC, 10,
'*CCC.*c1ccccc1*')
self._testSelectFragment('*CCC.*CCCCCCCCCCC*.*c1ccccc1*',
FraggleSim.FTYPE_CYCLIC_ACYCLIC, 25, None)
def _testSelectFragment(self, smiles, fragmentType, numAtoms, expected):
mol = Chem.MolFromSmiles(smiles, sanitize=False)
fragments = Chem.GetMolFrags(mol, asMols=True, sanitizeFrags=False)
self.assertEqual(_of(select_fragments(fragments, fragmentType, numAtoms)), _of(expected))
def test_isValidRingCut(self):
rdBase.DisableLog('rdApp.error')
self.assertEqual(FraggleSim.isValidRingCut(Chem.MolFromSmiles('*CCC*')), False)
self.assertEqual(FraggleSim.isValidRingCut(Chem.MolFromSmiles('*C1CC1*')), True)
self.assertEqual(FraggleSim.isValidRingCut(Chem.MolFromSmiles('*c1ccccc1*')), True)
self.assertEqual(
FraggleSim.isValidRingCut(Chem.MolFromSmiles('*cccc*', sanitize=False)), False)
rdBase.EnableLog('rdApp.error')
def test_GetFraggleSimilarity(self):
q = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c2ccccc12')
m = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12')
sim, match = FraggleSim.GetFraggleSimilarity(q, m)
self.assertAlmostEqual(sim, 0.980, places=2)
self.assertEqual(match, '*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1')
m = Chem.MolFromSmiles('COc1cc(CN2CCC(Nc3nc4ccccc4s3)CC2)c(OC)c2ccccc12')
sim, match = FraggleSim.GetFraggleSimilarity(q, m)
self.assertAlmostEqual(sim, 0.794, places=2)
self.assertEqual(match, '*C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1')
q = Chem.MolFromSmiles('COc1ccccc1')
sim, match = FraggleSim.GetFraggleSimilarity(q, m)
self.assertAlmostEqual(sim, 0.347, places=2)
self.assertEqual(match, '*c1ccccc1')
m = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12')
sim, match = FraggleSim.GetFraggleSimilarity(q, m)
self.assertAlmostEqual(sim, 0.266, places=2)
self.assertEqual(match, '*c1ccccc1')
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/Chem/Fraggle/UnitTestFraggle.py
|
Python
|
bsd-3-clause
| 7,513
|
[
"RDKit"
] |
4f4df51365e6507d32689396d69249ad028fed851b9772175153c78bc400d066
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RDP analysis of the Sampled Gaussian Mechanism.
Functionality for computing Renyi differential privacy (RDP) of an additive
Sampled Gaussian Mechanism (SGM). Its public interface consists of two methods:
compute_rdp(q, noise_multiplier, T, orders) computes RDP for SGM iterated
T times.
get_privacy_spent(orders, rdp, target_eps, target_delta) computes delta
(or eps) given RDP at multiple orders and
a target value for eps (or delta).
Example use:
Suppose that we have run an SGM applied to a function with l2-sensitivity 1.
Its parameters are given as a list of tuples (q1, sigma1, T1), ...,
(qk, sigma_k, Tk), and we wish to compute eps for a given delta.
The example code would be:
max_order = 32
orders = range(2, max_order + 1)
rdp = np.zeros_like(orders, dtype=float)
for q, sigma, T in parameters:
rdp += rdp_accountant.compute_rdp(q, sigma, T, orders)
eps, _, opt_order = rdp_accountant.get_privacy_spent(rdp, target_delta=delta)
"""
import math
import sys
import numpy as np
from scipy import special
########################
# LOG-SPACE ARITHMETIC #
########################
def _log_add(logx, logy):
"""Add two numbers in the log space."""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx, logy):
"""Subtract two numbers in the log space. Answer must be non-negative."""
if logx < logy:
raise ValueError("The result of subtraction must be non-negative.")
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _log_sub_sign(logx, logy):
"""Returns log(exp(logx)-exp(logy)) and its sign."""
if logx > logy:
s = True
mag = logx + np.log(1 - np.exp(logy - logx))
elif logx < logy:
s = False
mag = logy + np.log(1 - np.exp(logx - logy))
else:
s = True
mag = -np.inf
return s, mag
def _log_print(logx):
"""Pretty print."""
if logx < math.log(sys.float_info.max):
return "{}".format(math.exp(logx))
else:
return "exp({})".format(logx)
def _log_comb(n, k):
return (special.gammaln(n + 1) - special.gammaln(k + 1) -
special.gammaln(n - k + 1))
def _compute_log_a_int(q, sigma, alpha):
"""Compute log(A_alpha) for integer alpha. 0 < q < 1."""
assert isinstance(alpha, int)
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
_log_comb(alpha, i) + i * math.log(q) + (alpha - i) * math.log(1 - q))
s = log_coef_i + (i * i - i) / (2 * (sigma**2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_frac(q, sigma, alpha):
"""Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _compute_log_a(q, sigma, alpha):
"""Compute log(A_alpha) for any positive finite alpha."""
if float(alpha).is_integer():
return _compute_log_a_int(q, sigma, int(alpha))
else:
return _compute_log_a_frac(q, sigma, alpha)
def _log_erfc(x):
"""Compute log(erfc(x)) with high accuracy for large x."""
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
# If log_ndtr is not available, approximate as follows:
r = special.erfc(x)
if r == 0.0:
# Using the Laurent series at infinity for the tail of the erfc function:
# erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
# To verify in Mathematica:
# Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _compute_delta(orders, rdp, eps):
"""Compute delta given a list of RDP values and target epsilon.
Args:
orders: An array (or a scalar) of orders.
rdp: A list (or a scalar) of RDP guarantees.
eps: The target epsilon.
Returns:
Pair of (delta, optimal_order).
Raises:
ValueError: If input is malformed.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if eps < 0:
raise ValueError("Value of privacy loss bound epsilon must be >=0.")
if len(orders_vec) != len(rdp_vec):
raise ValueError("Input lists must have the same length.")
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# delta = min( np.exp((rdp_vec - eps) * (orders_vec - 1)) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4):
logdeltas = [] # work in log space to avoid overflows
for (a, r) in zip(orders_vec, rdp_vec):
if a < 1:
raise ValueError("Renyi divergence order must be >=1.")
if r < 0:
raise ValueError("Renyi divergence must be >=0.")
# For small alpha, we are better of with bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
# Take a min of the two bounds.
logdelta = 0.5 * math.log1p(-math.exp(-r))
if a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value for alpha.
# The bound is also not useful for small alpha, so doesn't matter.
rdp_bound = (a - 1) * (r - eps + math.log1p(-1 / a)) - math.log(a)
logdelta = min(logdelta, rdp_bound)
logdeltas.append(logdelta)
idx_opt = np.argmin(logdeltas)
return min(math.exp(logdeltas[idx_opt]), 1.), orders_vec[idx_opt]
def _compute_eps(orders, rdp, delta):
"""Compute epsilon given a list of RDP values and target delta.
Args:
orders: An array (or a scalar) of orders.
rdp: A list (or a scalar) of RDP guarantees.
delta: The target delta.
Returns:
Pair of (eps, optimal_order).
Raises:
ValueError: If input is malformed.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if delta <= 0:
raise ValueError("Privacy failure probability bound delta must be >0.")
if len(orders_vec) != len(rdp_vec):
raise ValueError("Input lists must have the same length.")
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# eps = min( rdp_vec - math.log(delta) / (orders_vec - 1) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4).
# Also appears in https://arxiv.org/abs/2001.05990 Equation 20 (in v1).
eps_vec = []
for (a, r) in zip(orders_vec, rdp_vec):
if a < 1:
raise ValueError("Renyi divergence order must be >=1.")
if r < 0:
raise ValueError("Renyi divergence must be >=0.")
if delta**2 + math.expm1(-r) >= 0:
# In this case, we can simply bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
eps = 0 # No need to try further computation if we have eps = 0.
elif a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value of alpha.
# The bound is also not useful for small alpha, so doesn't matter.
eps = r + math.log1p(-1 / a) - math.log(delta * a) / (a - 1)
else:
# In this case we can't do anything. E.g., asking for delta = 0.
eps = np.inf
eps_vec.append(eps)
idx_opt = np.argmin(eps_vec)
return max(0, eps_vec[idx_opt]), orders_vec[idx_opt]
def _stable_inplace_diff_in_log(vec, signs, n=-1):
"""Replaces the first n-1 dims of vec with the log of abs difference operator.
Args:
vec: numpy array of floats with size larger than 'n'
signs: Optional numpy array of bools with the same size as vec in case one
needs to compute partial differences vec and signs jointly describe a
vector of real numbers' sign and abs in log scale.
n: Optonal upper bound on number of differences to compute. If negative, all
differences are computed.
Returns:
The first n-1 dimension of vec and signs will store the log-abs and sign of
the difference.
Raises:
ValueError: If input is malformed.
"""
assert vec.shape == signs.shape
if n < 0:
n = np.max(vec.shape) - 1
else:
assert np.max(vec.shape) >= n + 1
for j in range(0, n, 1):
if signs[j] == signs[j + 1]: # When the signs are the same
# if the signs are both positive, then we can just use the standard one
signs[j], vec[j] = _log_sub_sign(vec[j + 1], vec[j])
# otherwise, we do that but toggle the sign
if not signs[j + 1]:
signs[j] = ~signs[j]
else: # When the signs are different.
vec[j] = _log_add(vec[j], vec[j + 1])
signs[j] = signs[j + 1]
def _get_forward_diffs(fun, n):
"""Computes up to nth order forward difference evaluated at 0.
See Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf
Args:
fun: Function to compute forward differences of.
n: Number of differences to compute.
Returns:
Pair (deltas, signs_deltas) of the log deltas and their signs.
"""
func_vec = np.zeros(n + 3)
signs_func_vec = np.ones(n + 3, dtype=bool)
# ith coordinate of deltas stores log(abs(ith order discrete derivative))
deltas = np.zeros(n + 2)
signs_deltas = np.zeros(n + 2, dtype=bool)
for i in range(1, n + 3, 1):
func_vec[i] = fun(1.0 * (i - 1))
for i in range(0, n + 2, 1):
# Diff in log scale
_stable_inplace_diff_in_log(func_vec, signs_func_vec, n=n + 2 - i)
deltas[i] = func_vec[0]
signs_deltas[i] = signs_func_vec[0]
return deltas, signs_deltas
def _compute_rdp(q, sigma, alpha):
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling rate.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
return _compute_log_a(q, sigma, alpha) / (alpha - 1)
def compute_rdp(q, noise_multiplier, steps, orders):
"""Computes RDP of the Sampled Gaussian Mechanism.
Args:
q: The sampling rate.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
steps: The number of steps.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDPs at all orders. Can be `np.inf`.
"""
if np.isscalar(orders):
rdp = _compute_rdp(q, noise_multiplier, orders)
else:
rdp = np.array(
[_compute_rdp(q, noise_multiplier, order) for order in orders])
return rdp * steps
def compute_rdp_sample_without_replacement(q, noise_multiplier, steps, orders):
"""Compute RDP of Gaussian Mechanism using sampling without replacement.
This function applies to the following schemes:
1. Sampling w/o replacement: Sample a uniformly random subset of size m = q*n.
2. ``Replace one data point'' version of differential privacy, i.e., n is
considered public information.
Reference: Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf (A strengthened
version applies subsampled-Gaussian mechanism)
- Wang, Balle, Kasiviswanathan. "Subsampled Renyi Differential Privacy and
Analytical Moments Accountant." AISTATS'2019.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
steps: The number of steps.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDPs at all orders, can be np.inf.
"""
if np.isscalar(orders):
rdp = _compute_rdp_sample_without_replacement_scalar(
q, noise_multiplier, orders)
else:
rdp = np.array([
_compute_rdp_sample_without_replacement_scalar(q, noise_multiplier,
order)
for order in orders
])
return rdp * steps
def _compute_rdp_sample_without_replacement_scalar(q, sigma, alpha):
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
assert (q <= 1) and (q >= 0) and (alpha >= 1)
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
if float(alpha).is_integer():
return _compute_rdp_sample_without_replacement_int(q, sigma, alpha) / (
alpha - 1)
else:
# When alpha not an integer, we apply Corollary 10 of [WBK19] to interpolate
# the CGF and obtain an upper bound
alpha_f = math.floor(alpha)
alpha_c = math.ceil(alpha)
x = _compute_rdp_sample_without_replacement_int(q, sigma, alpha_f)
y = _compute_rdp_sample_without_replacement_int(q, sigma, alpha_c)
t = alpha - alpha_f
return ((1 - t) * x + t * y) / (alpha - 1)
def _compute_rdp_sample_without_replacement_int(q, sigma, alpha):
"""Compute log(A_alpha) for integer alpha, subsampling without replacement.
When alpha is smaller than max_alpha, compute the bound Theorem 27 exactly,
otherwise compute the bound with Stirling approximation.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
max_alpha = 256
assert isinstance(alpha, int)
if np.isinf(alpha):
return np.inf
elif alpha == 1:
return 0
def cgf(x):
# Return rdp(x+1)*x, the rdp of Gaussian mechanism is alpha/(2*sigma**2)
return x * 1.0 * (x + 1) / (2.0 * sigma**2)
def func(x):
# Return the rdp of Gaussian mechanism
return 1.0 * x / (2.0 * sigma**2)
# Initialize with 1 in the log space.
log_a = 0
# Calculates the log term when alpha = 2
log_f2m1 = func(2.0) + np.log(1 - np.exp(-func(2.0)))
if alpha <= max_alpha:
# We need forward differences of exp(cgf)
# The following line is the numerically stable way of implementing it.
# The output is in polar form with logarithmic magnitude
deltas, _ = _get_forward_diffs(cgf, alpha)
# Compute the bound exactly requires book keeping of O(alpha**2)
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
elif i > 2:
delta_lo = deltas[int(2 * np.floor(i / 2.0)) - 1]
delta_hi = deltas[int(2 * np.ceil(i / 2.0)) - 1]
s = np.log(4) + 0.5 * (delta_lo + delta_hi)
s = np.minimum(s, np.log(2) + cgf(i - 1))
s += i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return float(log_a)
else:
# Compute the bound with stirling approximation. Everything is O(x) now.
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
else:
s = np.log(2) + cgf(i - 1) + i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return log_a
def compute_heterogeneous_rdp(sampling_probabilities, noise_multipliers,
steps_list, orders):
"""Computes RDP of Heteregoneous Applications of Sampled Gaussian Mechanisms.
Args:
sampling_probabilities: A list containing the sampling rates.
noise_multipliers: A list containing the noise multipliers: the ratio of the
standard deviation of the Gaussian noise to the l2-sensitivity of the
function to which it is added.
steps_list: A list containing the number of steps at each
`sampling_probability` and `noise_multiplier`.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDPs at all orders. Can be `np.inf`.
"""
assert len(sampling_probabilities) == len(noise_multipliers)
rdp = 0
for q, noise_multiplier, steps in zip(sampling_probabilities,
noise_multipliers, steps_list):
rdp += compute_rdp(q, noise_multiplier, steps, orders)
return rdp
def get_privacy_spent(orders, rdp, target_eps=None, target_delta=None):
"""Computes delta (or eps) for given eps (or delta) from RDP values.
Args:
orders: An array (or a scalar) of RDP orders.
rdp: An array of RDP values. Must be of the same length as the orders list.
target_eps: If not `None`, the epsilon for which we compute the
corresponding delta.
target_delta: If not `None`, the delta for which we compute the
corresponding epsilon. Exactly one of `target_eps` and `target_delta` must
be `None`.
Returns:
A tuple of epsilon, delta, and the optimal order.
Raises:
ValueError: If target_eps and target_delta are messed up.
"""
if target_eps is None and target_delta is None:
raise ValueError(
"Exactly one out of eps and delta must be None. (Both are).")
if target_eps is not None and target_delta is not None:
raise ValueError(
"Exactly one out of eps and delta must be None. (None is).")
if target_eps is not None:
delta, opt_order = _compute_delta(orders, rdp, target_eps)
return target_eps, delta, opt_order
else:
eps, opt_order = _compute_eps(orders, rdp, target_delta)
return eps, target_delta, opt_order
|
tensorflow/privacy
|
tensorflow_privacy/privacy/analysis/rdp_accountant.py
|
Python
|
apache-2.0
| 19,387
|
[
"Gaussian"
] |
b93d3ab1494468627e4fa704d36a928f48056846c707e9ef007dd8b2dcd85567
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
def test_npar():
from collections import namedtuple
from pickle import loads, dumps
from pylada.vasp.incar._params import Npar
Comm = namedtuple('Comm', ['n'])
# ispin == 1
assert Npar(0).incar_string(comm=Comm(16)) is None
assert Npar(1).incar_string(comm=Comm(16)) == "NPAR = 1"
assert Npar(2).incar_string(comm=Comm(16)) == "NPAR = 2"
assert Npar('power of two').incar_string(comm=Comm(16)) == "NPAR = 4"
assert Npar('power of two').incar_string(comm=Comm(17)) is None
assert Npar('power of two').incar_string(comm=Comm(2)) == "NPAR = 1"
assert Npar('sqrt').incar_string(comm=Comm(16)) == "NPAR = 4"
assert repr(Npar(1)) == "Npar(1)"
assert repr(loads(dumps(Npar(2)))) == "Npar(2)"
assert repr(loads(dumps(Npar("power of two")))) == "Npar('power of two')"
|
pylada/pylada-light
|
tests/vasp/incar/test_npar.py
|
Python
|
gpl-3.0
| 1,976
|
[
"CRYSTAL",
"VASP"
] |
07354f8285d5ea967d7a02bcf00541c060a7b6677b4eff844c70c8117d2d900b
|
from setuptools import setup, find_packages
version = '1.1.0'
setup(name='pyrabbit',
version=version,
description="A Pythonic interface to the RabbitMQ Management HTTP API",
long_description="""\
The main documentation lives at http://pyrabbit.readthedocs.org
There's no way to easily write programs against RabbitMQs management API
without resorting to some messy urllib boilerplate code involving HTTP
Basic authentication and parsing the JSON responses, etc. Pyrabbit
abstracts this away & provides an intuitive, easy way to work with the
data that lives inside of RabbitMQ, and manipulate the resources there.""",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='python http amqp rabbit rabbitmq management',
install_requires = ['requests'],
author='Brian K. Jones',
author_email='bkjones@gmail.com',
url='http://www.github.com/bkjones/pyrabbit',
download_url='http://www.github.com/bkjones/pyrabbit',
license='MIT',
packages=find_packages(exclude='tests'),
include_package_data=False,
zip_safe=False
)
|
bkjones/pyrabbit
|
setup.py
|
Python
|
bsd-3-clause
| 1,641
|
[
"Brian"
] |
e23f2a3e025018b9aaf6123ef1352e3a1e57d30ed5a7d52a28a876ffb005419e
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.763742
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/ajax/eventdescription.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class eventdescription(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(eventdescription, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<p>
\t''')
_v = VFFSL(SL,"description",True) # u'$description' on line 2, col 2
if _v is not None: write(_filter(_v, rawExpr=u'$description')) # from line 2, col 2.
write(u'''
</p>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_eventdescription= 'respond'
## END CLASS DEFINITION
if not hasattr(eventdescription, '_initCheetahAttributes'):
templateAPIClass = getattr(eventdescription, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(eventdescription)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=eventdescription()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/ajax/eventdescription.py
|
Python
|
gpl-2.0
| 4,498
|
[
"VisIt"
] |
480255f7adc3071ec3cad832e62be941a4e58ac945ba568829c828eaa568313f
|
"""
Spatial Error Models module
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, \
Daniel Arribas-Bel darribas@asu.edu, \
Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
from numpy import linalg as la
import ols as OLS
from pysal import lag_spatial
from utils import power_expansion, set_endog, iter_msg, sp_att
from utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments, get_spFilter, get_lags, _moments2eqs
from utils import spdot, RegressionPropsY, set_warn
import twosls as TSLS
import user_output as USER
import summary_output as SUMMARY
__all__ = ["GM_Error", "GM_Endog_Error", "GM_Combo"]
class BaseGM_Error(RegressionPropsY):
"""
GMM method for a spatial error model (note: no consistency checks
diagnostics or constant added); based on Kelejian and Prucha
(1998, 1999) [Kelejian1998]_ [Kelejian1999]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
Examples
--------
>>> import pysal
>>> import numpy as np
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('HOVAL')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('CRIME')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseGM_Error(y, x, w=w.sparse)
>>> np.around(model.betas, decimals=4)
array([[ 47.6946],
[ 0.7105],
[ -0.5505],
[ 0.3257]])
"""
def __init__(self, y, x, w):
# 1a. OLS --> \tilde{betas}
ols = OLS.BaseOLS(y=y, x=x)
self.n, self.k = ols.x.shape
self.x = ols.x
self.y = ols.y
# 1b. GMM --> \tilde{\lambda1}
moments = _momentsGM_Error(w, ols.u)
lambda1 = optim_moments(moments)
# 2a. OLS -->\hat{betas}
xs = get_spFilter(w, lambda1, self.x)
ys = get_spFilter(w, lambda1, self.y)
ols2 = OLS.BaseOLS(y=ys, x=xs)
# Output
self.predy = spdot(self.x, ols2.betas)
self.u = y - self.predy
self.betas = np.vstack((ols2.betas, np.array([[lambda1]])))
self.sig2 = ols2.sig2n
self.e_filtered = self.u - lambda1 * w * self.u
self.vm = self.sig2 * ols2.xtxi
se_betas = np.sqrt(self.vm.diagonal())
self._cache = {}
class GM_Error(BaseGM_Error):
"""
GMM method for a spatial error model, with results and diagnostics; based
on Kelejian and Prucha (1998, 1999) [Kelejian1998]_ [Kelejian1999]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object (always needed)
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array([dbf.by_col('HOVAL')]).T
Extract CRIME (crime) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'CRIME']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Error(y, x, w=w, name_y='hoval', name_x=['income', 'crime'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas).
>>> print model.name_x
['CONSTANT', 'income', 'crime', 'lambda']
>>> np.around(model.betas, decimals=4)
array([[ 47.6946],
[ 0.7105],
[ -0.5505],
[ 0.3257]])
>>> np.around(model.std_err, decimals=4)
array([ 12.412 , 0.5044, 0.1785])
>>> np.around(model.z_stat, decimals=6) #doctest: +SKIP
array([[ 3.84261100e+00, 1.22000000e-04],
[ 1.40839200e+00, 1.59015000e-01],
[ -3.08424700e+00, 2.04100000e-03]])
>>> round(model.sig2,4)
198.5596
"""
def __init__(self, y, x, w,
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Error.__init__(self, y=y, x=x_constant, w=w.sparse)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Error(reg=self, w=w, vm=vm)
class BaseGM_Endog_Error(RegressionPropsY):
'''
GMM method for a spatial error model with endogenous variables (note: no
consistency checks, diagnostics or constant added); based on Kelejian and
Prucha (1998, 1999) [Kelejian1998]_ [Kelejian1999]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
Examples
--------
>>> import pysal
>>> import numpy as np
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> yend = np.array([dbf.by_col('HOVAL')]).T
>>> q = np.array([dbf.by_col('DISCBD')]).T
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseGM_Endog_Error(y, x, yend, q, w=w.sparse)
>>> np.around(model.betas, decimals=4)
array([[ 82.573 ],
[ 0.581 ],
[ -1.4481],
[ 0.3499]])
'''
def __init__(self, y, x, yend, q, w):
# 1a. TSLS --> \tilde{betas}
tsls = TSLS.BaseTSLS(y=y, x=x, yend=yend, q=q)
self.n, self.k = tsls.z.shape
self.x = tsls.x
self.y = tsls.y
self.yend, self.z = tsls.yend, tsls.z
# 1b. GMM --> \tilde{\lambda1}
moments = _momentsGM_Error(w, tsls.u)
lambda1 = optim_moments(moments)
# 2a. 2SLS -->\hat{betas}
xs = get_spFilter(w, lambda1, self.x)
ys = get_spFilter(w, lambda1, self.y)
yend_s = get_spFilter(w, lambda1, self.yend)
tsls2 = TSLS.BaseTSLS(ys, xs, yend_s, h=tsls.h)
# Output
self.betas = np.vstack((tsls2.betas, np.array([[lambda1]])))
self.predy = spdot(tsls.z, tsls2.betas)
self.u = y - self.predy
self.sig2 = float(np.dot(tsls2.u.T, tsls2.u)) / self.n
self.e_filtered = self.u - lambda1 * w * self.u
self.vm = self.sig2 * tsls2.varb
self._cache = {}
class GM_Endog_Error(BaseGM_Endog_Error):
'''
GMM method for a spatial error model with endogenous variables, with
results and diagnostics; based on Kelejian and Prucha (1998,
1999) [Kelejian1998]_ [Kelejian1999]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always needed)
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Extract the CRIME column (crime rates) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array([dbf.by_col('CRIME')]).T
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in.
>>> x = np.array([dbf.by_col('INC')]).T
In this case we consider HOVAL (home value) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yend = np.array([dbf.by_col('HOVAL')]).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for HOVAL. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = np.array([dbf.by_col('DISCBD')]).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Endog_Error(y, x, yend, q, w=w, name_x=['inc'], name_y='crime', name_yend=['hoval'], name_q=['discbd'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
endogenous variables included.
>>> print model.name_z
['CONSTANT', 'inc', 'hoval', 'lambda']
>>> np.around(model.betas, decimals=4)
array([[ 82.573 ],
[ 0.581 ],
[ -1.4481],
[ 0.3499]])
>>> np.around(model.std_err, decimals=4)
array([ 16.1381, 1.3545, 0.7862])
'''
def __init__(self, y, x, yend, q, w,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Endog_Error.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend, q=q)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Endog_Error(reg=self, w=w, vm=vm)
class BaseGM_Combo(BaseGM_Endog_Error):
"""
GMM method for a spatial lag and error model, with endogenous variables
(note: no consistency checks, diagnostics or constant added); based on
Kelejian and Prucha (1998, 1999) [Kelejian1998]_ [Kelejian1999]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> w_lags = 1
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, None, None, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
Example only with spatial lag
>>> reg = BaseGM_Combo(y, X, yend=yd2, q=q2, w=w.sparse)
Print the betas
>>> print np.around(np.hstack((reg.betas[:-1],np.sqrt(reg.vm.diagonal()).reshape(3,1))),3)
[[ 39.059 11.86 ]
[ -1.404 0.391]
[ 0.467 0.2 ]]
And lambda
>>> print 'Lamda: ', np.around(reg.betas[-1], 3)
Lamda: [-0.048]
Example with both spatial lag and other endogenous variables
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
>>> reg = BaseGM_Combo(y, X, yd2, q2, w=w.sparse)
>>> betas = np.array([['CONSTANT'],['INC'],['HOVAL'],['W_CRIME']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas[:-1], np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)))
[['CONSTANT' '50.0944' '14.3593']
['INC' '-0.2552' '0.5667']
['HOVAL' '-0.6885' '0.3029']
['W_CRIME' '0.4375' '0.2314']]
"""
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True):
BaseGM_Endog_Error.__init__(self, y=y, x=x, w=w, yend=yend, q=q)
class GM_Combo(BaseGM_Combo):
"""
GMM method for a spatial lag and error model with endogenous variables,
with results and diagnostics; based on Kelejian and Prucha (1998,
1999) [Kelejian1998]_ [Kelejian1999]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always needed)
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Extract the CRIME column (crime rates) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Combo(y, X, w=w, name_y='crime', name_x=['income'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
spatial lag of the dependent variable. We can check the betas:
>>> print reg.name_z
['CONSTANT', 'income', 'W_crime', 'lambda']
>>> print np.around(np.hstack((reg.betas[:-1],np.sqrt(reg.vm.diagonal()).reshape(3,1))),3)
[[ 39.059 11.86 ]
[ -1.404 0.391]
[ 0.467 0.2 ]]
And lambda:
>>> print 'lambda: ', np.around(reg.betas[-1], 3)
lambda: [-0.048]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. As an example, we will include HOVAL (home value) as
endogenous and will instrument with DISCBD (distance to the CSB). We first
need to read in the variables:
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo(y, X, yd, q, w=w, name_x=['inc'], name_y='crime', name_yend=['hoval'], name_q=['discbd'], name_ds='columbus')
>>> print reg.name_z
['CONSTANT', 'inc', 'hoval', 'W_crime', 'lambda']
>>> names = np.array(reg.name_z).reshape(5,1)
>>> print np.hstack((names[0:4,:], np.around(np.hstack((reg.betas[:-1], np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)))
[['CONSTANT' '50.0944' '14.3593']
['inc' '-0.2552' '0.5667']
['hoval' '-0.6885' '0.3029']
['W_crime' '0.4375' '0.2314']]
>>> print 'lambda: ', np.around(reg.betas[-1], 3)
lambda: [ 0.254]
"""
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
x_constant = USER.check_constant(x)
BaseGM_Combo.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2,
w_lags=w_lags, lag_q=lag_q)
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y,
self.predy, yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_yend.append(USER.set_name_yend_sp(self.name_y))
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_q.extend(
USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q))
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Combo(reg=self, w=w, vm=vm)
def _momentsGM_Error(w, u):
try:
wsparse = w.sparse
except:
wsparse = w
n = wsparse.shape[0]
u2 = np.dot(u.T, u)
wu = wsparse * u
uwu = np.dot(u.T, wu)
wu2 = np.dot(wu.T, wu)
wwu = wsparse * wu
uwwu = np.dot(u.T, wwu)
wwu2 = np.dot(wwu.T, wwu)
wuwwu = np.dot(wu.T, wwu)
wtw = wsparse.T * wsparse
trWtW = np.sum(wtw.diagonal())
g = np.array([[u2[0][0], wu2[0][0], uwu[0][0]]]).T / n
G = np.array(
[[2 * uwu[0][0], -wu2[0][0], n], [2 * wuwwu[0][0], -wwu2[0][0], trWtW],
[uwwu[0][0] + wu2[0][0], -wuwwu[0][0], 0.]]) / n
return [G, g]
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import pysal
import numpy as np
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('HOVAL')]).T
names_to_extract = ['INC', 'CRIME']
x = np.array([dbf.by_col(name) for name in names_to_extract]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
model = GM_Error(y, x, w, name_y='hoval',
name_x=['income', 'crime'], name_ds='columbus')
print model.summary
|
TaylorOshan/pysal
|
pysal/spreg/error_sp.py
|
Python
|
bsd-3-clause
| 43,203
|
[
"COLUMBUS"
] |
6389fbec2f9234f74fdd79c6ac9d392c11e2f7a820f3d3271055620bf4ae2ad7
|
from cd_perf_promotion.engines.argumentengine import ArgumentEngine
from cd_perf_promotion.engines.configengine import ConfigEngine
from cd_perf_promotion.engines.dataengine import DataEngine
from cd_perf_promotion.engines.comparisonengine import ComparisonEngine
from cd_perf_promotion.engines.outputengine import OutputEngine
def main():
"""
Main function
Prints and introduction statement and starts the comparison engine
"""
# Print the introduction message
print("\n####################################################################\n"
"Continuous Delivery Performance Promotion Tool\n"
"CDK Global, LLC\n"
"####################################################################\n")
arguments = ArgumentEngine().process_arguments()
# Grab the configuration information
configengine = ConfigEngine("config.json", arguments['lr'], arguments['ll'], arguments['blzkey'], arguments['blztest'],
arguments['appduser'], arguments['appdpass'], arguments['appdapp'],
arguments['wpgtkey'])
config_data = configengine.process_config()
# Grab the performance data
dataengine = DataEngine()
perf_data = dataengine.get_data(config_data)
# Begin evaluating the build
comparisonengine = ComparisonEngine()
evaluation = comparisonengine.process_data(config_data, perf_data)
# Output the data
outputengine = OutputEngine()
outputengine.release_judgement(evaluation, arguments['oc'], config_data["elastic_kibana"])
if __name__ == '__main__':
main()
|
CDKGlobal/cd-performance-promotion
|
cd_perf_promotion/main.py
|
Python
|
mit
| 1,639
|
[
"CDK"
] |
6c7c14a98ecd15051d9fdaa84fd8605fbada39f9a697d1724d4a97b7f86ff0f4
|
"""Functions to process IPython magics with."""
from functools import lru_cache
import dataclasses
import ast
from typing import Dict, List, Tuple, Optional
import secrets
import sys
import collections
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
from black.report import NothingChanged
from black.output import out
TRANSFORMED_MAGICS = frozenset(
(
"get_ipython().run_cell_magic",
"get_ipython().system",
"get_ipython().getoutput",
"get_ipython().run_line_magic",
)
)
TOKENS_TO_IGNORE = frozenset(
(
"ENDMARKER",
"NL",
"NEWLINE",
"COMMENT",
"DEDENT",
"UNIMPORTANT_WS",
"ESCAPED_NL",
)
)
PYTHON_CELL_MAGICS = frozenset(
(
"capture",
"prun",
"pypy",
"python",
"python3",
"time",
"timeit",
)
)
TOKEN_HEX = secrets.token_hex
@dataclasses.dataclass(frozen=True)
class Replacement:
mask: str
src: str
@lru_cache()
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
try:
import IPython # noqa:F401
import tokenize_rt # noqa:F401
except ModuleNotFoundError:
if verbose or not quiet:
msg = (
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
"You can fix this by running ``pip install black[jupyter]``"
)
out(msg)
return False
else:
return True
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
"""Remove trailing semicolon from Jupyter notebook cell.
For example,
fig, ax = plt.subplots()
ax.plot(x_data, y_data); # plot data
would become
fig, ax = plt.subplots()
ax.plot(x_data, y_data) # plot data
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
``tokenize_rt`` so that round-tripping works fine.
"""
from tokenize_rt import (
src_to_tokens,
tokens_to_src,
reversed_enumerate,
)
tokens = src_to_tokens(src)
trailing_semicolon = False
for idx, token in reversed_enumerate(tokens):
if token.name in TOKENS_TO_IGNORE:
continue
if token.name == "OP" and token.src == ";":
del tokens[idx]
trailing_semicolon = True
break
if not trailing_semicolon:
return src, False
return tokens_to_src(tokens), True
def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
"""Put trailing semicolon back if cell originally had it.
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
``tokenize_rt`` so that round-tripping works fine.
"""
if not has_trailing_semicolon:
return src
from tokenize_rt import src_to_tokens, tokens_to_src, reversed_enumerate
tokens = src_to_tokens(src)
for idx, token in reversed_enumerate(tokens):
if token.name in TOKENS_TO_IGNORE:
continue
tokens[idx] = token._replace(src=token.src + ";")
break
else: # pragma: nocover
raise AssertionError(
"INTERNAL ERROR: Was not able to reinstate trailing semicolon. "
"Please report a bug on https://github.com/psf/black/issues. "
) from None
return str(tokens_to_src(tokens))
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
"""Mask IPython magics so content becomes parseable Python code.
For example,
%matplotlib inline
'foo'
becomes
"25716f358c32750e"
'foo'
The replacements are returned, along with the transformed code.
"""
replacements: List[Replacement] = []
try:
ast.parse(src)
except SyntaxError:
# Might have IPython magics, will process below.
pass
else:
# Syntax is fine, nothing to mask, early return.
return src, replacements
from IPython.core.inputtransformer2 import TransformerManager
transformer_manager = TransformerManager()
transformed = transformer_manager.transform_cell(src)
transformed, cell_magic_replacements = replace_cell_magics(transformed)
replacements += cell_magic_replacements
transformed = transformer_manager.transform_cell(transformed)
transformed, magic_replacements = replace_magics(transformed)
if len(transformed.splitlines()) != len(src.splitlines()):
# Multi-line magic, not supported.
raise NothingChanged
replacements += magic_replacements
return transformed, replacements
def get_token(src: str, magic: str) -> str:
"""Return randomly generated token to mask IPython magic with.
For example, if 'magic' was `%matplotlib inline`, then a possible
token to mask it with would be `"43fdd17f7e5ddc83"`. The token
will be the same length as the magic, and we make sure that it was
not already present anywhere else in the cell.
"""
assert magic
nbytes = max(len(magic) // 2 - 1, 1)
token = TOKEN_HEX(nbytes)
counter = 0
while token in src:
token = TOKEN_HEX(nbytes)
counter += 1
if counter > 100:
raise AssertionError(
"INTERNAL ERROR: Black was not able to replace IPython magic. "
"Please report a bug on https://github.com/psf/black/issues. "
f"The magic might be helpful: {magic}"
) from None
if len(token) + 2 < len(magic):
token = f"{token}."
return f'"{token}"'
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
"""Replace cell magic with token.
Note that 'src' will already have been processed by IPython's
TransformerManager().transform_cell.
Example,
get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n')
becomes
"a794."
ls =!ls
The replacement, along with the transformed code, is returned.
"""
replacements: List[Replacement] = []
tree = ast.parse(src)
cell_magic_finder = CellMagicFinder()
cell_magic_finder.visit(tree)
if cell_magic_finder.cell_magic is None:
return src, replacements
header = cell_magic_finder.cell_magic.header
mask = get_token(src, header)
replacements.append(Replacement(mask=mask, src=header))
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
"""Replace magics within body of cell.
Note that 'src' will already have been processed by IPython's
TransformerManager().transform_cell.
Example, this
get_ipython().run_line_magic('matplotlib', 'inline')
'foo'
becomes
"5e67db56d490fd39"
'foo'
The replacement, along with the transformed code, are returned.
"""
replacements = []
magic_finder = MagicFinder()
magic_finder.visit(ast.parse(src))
new_srcs = []
for i, line in enumerate(src.splitlines(), start=1):
if i in magic_finder.magics:
offsets_and_magics = magic_finder.magics[i]
if len(offsets_and_magics) != 1: # pragma: nocover
raise AssertionError(
f"Expecting one magic per line, got: {offsets_and_magics}\n"
"Please report a bug on https://github.com/psf/black/issues."
)
col_offset, magic = (
offsets_and_magics[0].col_offset,
offsets_and_magics[0].magic,
)
mask = get_token(src, magic)
replacements.append(Replacement(mask=mask, src=magic))
line = line[:col_offset] + mask
new_srcs.append(line)
return "\n".join(new_srcs), replacements
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
"""Remove replacements from cell.
For example
"9b20"
foo = bar
becomes
%%time
foo = bar
"""
for replacement in replacements:
src = src.replace(replacement.mask, replacement.src)
return src
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
"""Check if attribute is IPython magic.
Note that the source of the abstract syntax tree
will already have been processed by IPython's
TransformerManager().transform_cell.
"""
return (
isinstance(node, ast.Attribute)
and isinstance(node.value, ast.Call)
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "get_ipython"
)
def _get_str_args(args: List[ast.expr]) -> List[str]:
str_args = []
for arg in args:
assert isinstance(arg, ast.Str)
str_args.append(arg.s)
return str_args
@dataclasses.dataclass(frozen=True)
class CellMagic:
name: str
params: Optional[str]
body: str
@property
def header(self) -> str:
if self.params:
return f"%%{self.name} {self.params}"
return f"%%{self.name}"
# ast.NodeVisitor + dataclass = breakage under mypyc.
class CellMagicFinder(ast.NodeVisitor):
"""Find cell magics.
Note that the source of the abstract syntax tree
will already have been processed by IPython's
TransformerManager().transform_cell.
For example,
%%time\nfoo()
would have been transformed to
get_ipython().run_cell_magic('time', '', 'foo()\\n')
and we look for instances of the latter.
"""
def __init__(self, cell_magic: Optional[CellMagic] = None) -> None:
self.cell_magic = cell_magic
def visit_Expr(self, node: ast.Expr) -> None:
"""Find cell magic, extract header and body."""
if (
isinstance(node.value, ast.Call)
and _is_ipython_magic(node.value.func)
and node.value.func.attr == "run_cell_magic"
):
args = _get_str_args(node.value.args)
self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2])
self.generic_visit(node)
@dataclasses.dataclass(frozen=True)
class OffsetAndMagic:
col_offset: int
magic: str
# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here
# as mypyc will generate broken code.
class MagicFinder(ast.NodeVisitor):
"""Visit cell to look for get_ipython calls.
Note that the source of the abstract syntax tree
will already have been processed by IPython's
TransformerManager().transform_cell.
For example,
%matplotlib inline
would have been transformed to
get_ipython().run_line_magic('matplotlib', 'inline')
and we look for instances of the latter (and likewise for other
types of magics).
"""
def __init__(self) -> None:
self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list)
def visit_Assign(self, node: ast.Assign) -> None:
"""Look for system assign magics.
For example,
black_version = !black --version
env = %env var
would have been (respectively) transformed to
black_version = get_ipython().getoutput('black --version')
env = get_ipython().run_line_magic('env', 'var')
and we look for instances of any of the latter.
"""
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = _get_str_args(node.value.args)
if node.value.func.attr == "getoutput":
src = f"!{args[0]}"
elif node.value.func.attr == "run_line_magic":
src = f"%{args[0]}"
if args[1]:
src += f" {args[1]}"
else:
raise AssertionError(
f"Unexpected IPython magic {node.value.func.attr!r} found. "
"Please report a bug on https://github.com/psf/black/issues."
) from None
self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
self.generic_visit(node)
def visit_Expr(self, node: ast.Expr) -> None:
"""Look for magics in body of cell.
For examples,
!ls
!!ls
?ls
??ls
would (respectively) get transformed to
get_ipython().system('ls')
get_ipython().getoutput('ls')
get_ipython().run_line_magic('pinfo', 'ls')
get_ipython().run_line_magic('pinfo2', 'ls')
and we look for instances of any of the latter.
"""
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = _get_str_args(node.value.args)
if node.value.func.attr == "run_line_magic":
if args[0] == "pinfo":
src = f"?{args[1]}"
elif args[0] == "pinfo2":
src = f"??{args[1]}"
else:
src = f"%{args[0]}"
if args[1]:
src += f" {args[1]}"
elif node.value.func.attr == "system":
src = f"!{args[0]}"
elif node.value.func.attr == "getoutput":
src = f"!!{args[0]}"
else:
raise NothingChanged # unsupported magic.
self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
self.generic_visit(node)
|
psf/black
|
src/black/handle_ipynb_magics.py
|
Python
|
mit
| 13,534
|
[
"VisIt"
] |
b38d02a3cf5ea529c7573f4c7dd96866a58299488a294ec1ba66bd8fe139cacb
|
##########################################################################
#
# This file is part of OCEMR.
#
# OCEMR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OCEMR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OCEMR. If not, see <http://www.gnu.org/licenses/>.
#
#
#########################################################################
# Copyright 2011-8 Philip Freeman <elektron@halo.nu>
##########################################################################
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest
from django.db.models import Q
def get_visit_menu(current,patient):
menu = [
{ 'link': 'past', 'ord':1, 'title': 'Past Visits', 'active': False },
{ 'link': 'subj', 'ord':2, 'title': 'Reason for Visit', 'active': False },
{ 'link': 'obje', 'ord':3, 'title': 'Vitals/Exam', 'active': False },
{ 'link': 'labs', 'ord':4, 'title': 'Labs', 'active': False },
{ 'link': 'plan', 'ord':5, 'title': 'Assessment/Plan', 'active': False },
{ 'link': 'meds', 'ord':6, 'title': 'Meds', 'active': False },
{ 'link': 'refe', 'ord':8, 'title': 'Referrals', 'active': False },
{ 'link': 'note', 'ord':10, 'title': 'Notes', 'active': False, 'hilite': False },
]
if patient.gender == 'F':
menu.append({ 'link': 'preg', 'ord':9, 'title': 'Pregnancy History', 'active': False })
for i in range(0,len(menu)):
if menu[i]['link']==current:
menu[i]['active']=True
if patient.scratchNote:
for i in range(0,len(menu)):
if menu[i]['link']=='note':
menu[i]['hilite']=True
return menu
@login_required
def visit(request,id):
"""
"""
return HttpResponseRedirect('/visit/%s/past/'%(id))
@login_required
def visit_claim(request,id):
"""
"""
from ocemr.models import Visit, Diagnosis
v = Visit.objects.get(pk=id)
v.status = 'INPR'
from datetime import datetime
v.claimedBy = request.user
v.claimedDateTime = datetime.now()
v.save()
p = v.patient
try:
old_v = Visit.objects.filter(patient=p).filter(status='RESO').order_by('-finishedDateTime')[0]
except:
old_v = None
if old_v != None:
old_diags = Diagnosis.objects.filter(visit=old_v).exclude(status='RES')
for old_diag in old_diags:
try: # Check for existing Diagnosis in this record
d = Diagnosis.objects.get(type=old_diag.type, patient=p, visit=v)
except:
d = None
if d == None: # Create the record.
d, is_new = Diagnosis.objects.get_or_create(type=old_diag.type, patient=p, visit=v, diagnosedBy=request.user)
if is_new:
d.save()
return render(request, 'close_window.html', {})
@login_required
def visit_unclaim(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
v.status = 'WAIT'
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_finish(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
if v.status == 'INPR':
v.status = 'CHOT'
else:
v.status = 'MISS'
from datetime import datetime
v.finishedBy = request.user
v.finishedDateTime = datetime.now()
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_unfinish(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
if v.status == 'MISS':
from datetime import datetime
v.seenDateTime = datetime.now()
v.status = 'WAIT'
else:
v.status = 'INPR'
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_seen(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
if v.status == 'SCHE':
v.status = 'WAIT'
from datetime import datetime
v.seenDateTime = datetime.now()
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_unseen(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
if v.status == 'WAIT':
v.status = 'SCHE'
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_past(request,id):
"""
Visit
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('past', p)
return render(request, 'visit_past.html', locals())
@login_required
def visit_subj(request,id):
"""
Visit
"""
from ocemr.models import Visit, SymptomType, VisitSymptom
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('subj',p)
symptomTypes = SymptomType.objects.all()
symptoms = VisitSymptom.objects.filter(visit=v)
return render(request, 'visit_subj.html', locals())
@login_required
def visit_subj_new(request,id, symptomtypeid):
"""
"""
from ocemr.models import SymptomType
from ocemr.forms import NewVisitSymptomForm
vid=int(id)
stid=int(symptomtypeid)
st=SymptomType.objects.get(pk=stid)
if request.method == 'POST': # If the form has been submitted...
form = NewVisitSymptomForm(vid, stid, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewVisitSymptomForm(vid, stid) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add a Symptom: %s'%(st.title),
'form_action': '/visit/%d/subj/new/%d/'%(vid,stid),
'form': form,
})
@login_required
def visit_subj_edit(request,id, visitsymptomid):
"""
"""
from ocemr.models import VisitSymptom
from ocemr.forms import EditVisitSymptomForm
vs = VisitSymptom.objects.get(pk=visitsymptomid)
if request.method == 'POST':
form = EditVisitSymptomForm(request.POST)
if form.is_valid():
vs.notes = form.cleaned_data['notes']
vs.save()
return HttpResponseRedirect('/close_window/')
else:
form = EditVisitSymptomForm(initial={'notes': vs.notes})
return render(request, 'popup_form.html', {
'title': 'Edit Symptom Notes: %s'%(vs.type.title),
'form_action': '/visit/%s/subj/edit/%s/'%(id,visitsymptomid),
'form': form,
})
@login_required
def visit_subj_delete(request,id, visitsymptomid):
"""
"""
from ocemr.models import VisitSymptom
o = VisitSymptom.objects.get(pk=visitsymptomid)
from ocemr.forms import ConfirmDeleteForm
if request.method == 'POST':
form = ConfirmDeleteForm(request.POST)
if form.is_valid():
if form.cleaned_data['doDelete']:
o.delete()
return HttpResponseRedirect('/close_window/')
else:
form = ConfirmDeleteForm()
return render(request, 'popup_form.html', {
'title': 'Delete Symptom: %s'%(o.type.title),
'form_action': '/visit/%s/subj/delete/%s/'%(id,visitsymptomid),
'form': form,
})
@login_required
def visit_obje(request,id):
"""
Visit
"""
from ocemr.models import Visit, VitalType, Vital
from ocemr.models import ExamNoteType, ExamNote
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('obje',p)
vitalTypes = VitalType.objects.all()
vital_times_in = Vital.objects.filter(visit=v).values('observedDateTime').distinct()
vital_times = []
for vt in vital_times_in:
vitals = Vital.objects.filter(visit=v, observedDateTime=vt['observedDateTime'])
vital_times.append( [ vt['observedDateTime'], vitals ] )
#vitals = Vital.objects.filter(visit=v)
examNoteTypes = ExamNoteType.objects.all()
examNotes = ExamNote.objects.filter(visit=v)
return render(request, 'visit_obje.html', locals())
@login_required
def visit_obje_vitals_new(request,id):
"""
"""
from ocemr.models import Vital, VitalType, Visit
from ocemr.forms import NewVitalsForm
vid=int(id)
v=Visit.objects.get(pk=vid)
if request.method == 'POST': # If the form has been submitted...
form = NewVitalsForm(v, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
#form.cleaned_data['']
p = form.cleaned_data['patient']
vis = form.cleaned_data['visit']
dt = form.cleaned_data['observedDateTime']
u = form.cleaned_data['observedBy']
# Temp
data = form.cleaned_data['temp_in']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='Temp')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#
data = form.cleaned_data['bloodPressureSystolic']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='BP - Systolic')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#
data = form.cleaned_data['bloodPressureDiastolic']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='BP - Diastolic')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#
data = form.cleaned_data['heartRate']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='HR')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#
data = form.cleaned_data['respiratoryRate']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='RR')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#
data = form.cleaned_data['height_in']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='Height')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#
data = form.cleaned_data['weight_in']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='Weight')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#SpO2
data = form.cleaned_data['spo2_in']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='SpO2')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
#Oxygen
data = form.cleaned_data['oxygen_in']
if not ( data == None or data == ''):
vt = VitalType.objects.get(title='Oxygen')
v = Vital(
type=vt,patient=p, visit=vis,
observedDateTime=dt, observedBy=u,
data=data)
v.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewVitalsForm(v, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add Vitals',
'form_action': '/visit/%d/obje/vitals/new/'%(vid),
'form': form,
})
@login_required
def visit_obje_vital_delete(request,id, oid):
"""
"""
from ocemr.models import Vital
o = Vital.objects.get(pk=oid)
from ocemr.forms import ConfirmDeleteForm
if request.method == 'POST':
form = ConfirmDeleteForm(request.POST)
if form.is_valid():
if form.cleaned_data['doDelete']:
o.delete()
return HttpResponseRedirect('/close_window/')
else:
form = ConfirmDeleteForm()
return render(request, 'popup_form.html', {
'title': 'Delete Vital: %s'%(o),
'form_action': '/visit/%s/obje/vital/delete/%s/'%(id,oid),
'form': form,
})
@login_required
def visit_obje_examNote_new(request,id, examnotetypeid):
"""
"""
from ocemr.models import ExamNoteType, Visit
from ocemr.forms import NewExamNoteForm
vid=int(id)
entid=int(examnotetypeid)
v=Visit.objects.get(pk=vid)
ent=ExamNoteType.objects.get(pk=entid)
if request.method == 'POST': # If the form has been submitted...
form = NewExamNoteForm(v, ent, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewExamNoteForm(v, ent, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add an Exam Note: %s'%(ent.title),
'form_action': '/visit/%d/obje/examNote/new/%d/'%(vid,entid),
'form': form,
})
@login_required
def visit_obje_examNote_edit(request,id, examnoteid):
"""
"""
from ocemr.models import ExamNote
from ocemr.forms import EditExamNoteForm
en = ExamNote.objects.get(pk=examnoteid)
if request.method == 'POST':
form = EditExamNoteForm(request.POST)
if form.is_valid():
en.note = form.cleaned_data['note']
en.save()
return HttpResponseRedirect('/close_window/')
else:
form = EditExamNoteForm(initial={'note': en.note})
return render(request, 'popup_form.html', {
'title': 'Edit Exam Note: %s'%(en.type.title),
'form_action': '/visit/%s/obje/examNote/edit/%s/'%(id,examnoteid),
'form': form,
})
@login_required
def visit_labs(request,id):
"""
Visit
"""
from ocemr.models import Visit, LabType, Lab
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('labs',p)
labTypes = LabType.objects.all()
labs = Lab.objects.filter(visit=v)
return render(request, 'visit_labs.html', locals())
@login_required
def visit_labs_new(request,id, labtypeid):
"""
"""
from ocemr.models import LabType, Visit, Lab
vid=int(id)
ltid=int(labtypeid)
v = Visit.objects.get(pk=vid)
lt=LabType.objects.get(pk=ltid)
l = Lab(type=lt, patient=v.patient, visit=v, orderedBy=request.user, status='ORD')
l.save()
return HttpResponseRedirect('/close_window/')
@login_required
def visit_plan(request,id):
"""
Visit
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('plan',p)
return render(request, 'visit_plan.html', locals())
@login_required
def visit_plan_diag_new(request,id):
"""
"""
from ocemr.models import Visit
from ocemr.forms import NewDiagnosisForm
v = Visit.objects.get(pk=id)
p = v.patient
if request.method == 'POST':
form = NewDiagnosisForm(v, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewDiagnosisForm(v, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add a Diagnosis for %s'%(p),
'form_action': '/visit/%d/plan/diag/new/'%(v.id),
'form': form,
})
@login_required
def visit_plan_diag_new_bytype(request, id, dtid):
from ocemr.models import Visit, DiagnosisType, Diagnosis
v = Visit.objects.get(pk=id)
p = v.patient
dt = DiagnosisType.objects.get(pk=dtid)
d = Diagnosis(type=dt, patient=p, visit=v, status='NEW')
d.save()
return HttpResponseRedirect('/close_window/')
@login_required
def visit_meds(request,id):
"""
Visit
"""
from ocemr.models import Visit, Diagnosis
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('meds',p)
q_status = Q( status='NEW' ) | Q( status='FOL' )
diagnoses = Diagnosis.objects.filter(visit=v).filter(q_status)
return render(request, 'visit_meds.html', locals())
@login_required
def visit_meds_new(request,id,did):
"""
"""
from ocemr.models import Diagnosis
from ocemr.forms import NewMedForm
did = int(did)
d = Diagnosis.objects.get(pk=did)
if request.method == 'POST':
form = NewMedForm(d, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewMedForm(d, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add a Med for %s - %s'%(d.patient,d.type.title),
'form_action': '/visit/%d/meds/new/%d/'%(d.visit.id,did),
'form': form,
})
@login_required
def visit_refe(request,id):
"""
Visit
"""
from ocemr.models import Visit, Referral
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('refe',p)
referrals = Referral.objects.filter(patient=p).order_by('-addedDateTime')
return render(request, 'visit_refe.html', locals())
@login_required
def visit_refe_new(request,id):
"""
"""
from ocemr.models import Visit, Referral
from ocemr.forms import NewReferralForm
vid=int(id)
v=Visit.objects.get(pk=vid)
if request.method == 'POST': # If the form has been submitted...
form = NewReferralForm(v, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewReferralForm(v, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add a Referral',
'form_action': '/visit/%d/refe/new/'%(vid),
'form': form,
})
@login_required
def visit_refe_edit(request,id, refid):
"""
"""
from ocemr.models import Referral
from ocemr.forms import EditReferralForm
r = Referral.objects.get(pk=refid)
if request.method == 'POST':
form = EditReferralForm(request.POST)
if form.is_valid():
r.to = form.cleaned_data['to']
r.reason = form.cleaned_data['reason']
r.save()
return HttpResponseRedirect('/close_window/')
else:
form = EditReferralForm(initial={'to':r.to, 'reason': r.reason})
return render(request, 'popup_form.html', {
'title': 'Edit Referral: %s'%(r),
'form_action': '/visit/%s/refe/edit/%s/'%(id,refid),
'form': form,
})
@login_required
def visit_note(request,id):
"""
Visit
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
p = v.patient
menu = get_visit_menu('note',p)
return render(request, 'visit_note.html', locals())
@login_required
def visit_allergy_new(request,id):
"""
"""
from ocemr.models import Visit, Allergy
from ocemr.forms import NewAllergyForm
vid=int(id)
v=Visit.objects.get(pk=vid)
if request.method == 'POST': # If the form has been submitted...
form = NewAllergyForm(v, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewAllergyForm(v, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add an Allergy',
'form_action': '/visit/%d/allergy/new/'%(vid),
'form': form,
})
@login_required
def visit_allergy_delete(request,id, oid):
"""
"""
from ocemr.models import Allergy
o = Allergy.objects.get(pk=oid)
from ocemr.forms import ConfirmDeleteForm
if request.method == 'POST':
form = ConfirmDeleteForm(request.POST)
if form.is_valid():
if form.cleaned_data['doDelete']:
o.delete()
return HttpResponseRedirect('/close_window/')
else:
form = ConfirmDeleteForm()
return render(request, 'popup_form.html', {
'title': 'Delete Allergy: %s'%(o.to),
'form_action': '/visit/%s/allergy/delete/%s/'%(id,oid),
'form': form,
})
@login_required
def visit_collect(request,id):
"""
"""
from ocemr.models import Visit
from ocemr.forms import NewCashLogForm
vid=int(id)
v=Visit.objects.get(pk=vid)
if request.method == 'POST': # If the form has been submitted...
form = NewCashLogForm(v, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewCashLogForm(v, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Collect',
'form_action': '/visit/%d/collect/'%(vid),
'form': form,
})
@login_required
def visit_cost_estimate_detail(request,id):
from ocemr.models import Visit
vid=int(id)
v=Visit.objects.get(pk=vid)
table="<TR><TH>Title<TH>Base Cost<TH>Quantity<TH>Total</TR>"
ced = v.get_estimated_visit_cost_detail()
for row in ced:
table+="<TR><TD>%s<TD>%s<TD>%s<TD>%s</TR>"%(
row[0], row[1], row[2], row[3] )
table+="<TR><TD COLSPAN=3>TOTAL<TD>%s</TR>"%(
v.get_estimated_visit_cost() )
return render(request, 'popup_table.html', {
'title': 'Estimated Visit Cost Detail',
'table': table,
})
@login_required
def visit_bill_amount(request,id):
"""
"""
from ocemr.models import Visit
from ocemr.forms import EditBillAmountForm
vid=int(id)
v=Visit.objects.get(pk=vid)
if request.method == 'POST': # If the form has been submitted...
form = EditBillAmountForm(v.cost, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
v.cost=form.cleaned_data['amount']
v.save()
return HttpResponseRedirect('/close_window/')
else:
form = EditBillAmountForm(v.cost) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Edit Bill Amount',
'form_action': '/visit/%d/bill_amount/'%(vid),
'form': form,
})
@login_required
def visit_resolve(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
if v.status == 'CHOT':
v.status = 'RESO'
from datetime import datetime
v.resolvedBy = request.user
v.resolvedDateTime = datetime.now()
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_unresolve(request,id):
"""
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
if v.status == 'RESO':
v.status = 'CHOT'
v.save()
return render(request, 'close_window.html', {})
@login_required
def visit_record(request, id, type):
"""
['enscript', '-P', 'p1102w', '--header=Engeye Health Clinic', '--footer=Page $% of $=', '--word-wrap', '--mark-wrapped-lines=arrow', '/etc/motd']
"""
from ocemr.models import Visit
v = Visit.objects.get(pk=id)
# TODO: Make this header Configurable!
head_text = """\t\t\t\tEngeye Health Clinic - Ddegeya-Masaka
\t\t\t\tP.O. Box 26592, Kampala\t\t0772-556105\t\twww.engeye.org
\t\tBino bye bikwata ku kujjanjabibwa kwo funnye leero
"""
if v.finishedDateTime:
d = v.finishedDateTime
else:
d = v.seenDateTime
head_text += "\tPatient: %s\tVisit# %05d\tDate: %02d-%02d-%02d\n"%(
v.patient,v.id,
d.day, d.month, d.year)
allergy_list = []
for a in v.patient.get_allergies():
allergy_list.append(a.to)
if len(allergy_list) > 0:
head_text += "\t\tAllergies: %s\n"%(", ".join(allergy_list))
summ_text = v.get_summary_text()
next_visits = Visit.objects.filter(patient=v.patient,scheduledDate__gt=v.scheduledDate)
upco_text=""
if len(next_visits) > 0:
upco_text = "\n\n\t\tKomawo Kudwaliro nga:\n"
for uv in next_visits:
upco_text += "\t%02d-%02d-%02d - %s:%s\n"%(
uv.scheduledDate.day,
uv.scheduledDate.month,
uv.scheduledDate.year,
uv.displayReason,
uv.reasonDetail,
)
text_out = head_text + summ_text + upco_text
if type == "print":
from subprocess import Popen, PIPE
from ocemr.settings import PRINTER_NAME, PAPER_SIZE
cmd = " ".join( ('enscript', '-P', PRINTER_NAME, '--word-wrap', '--mark-wrapped-lines=arrow', '--font=Times-Roman12', '--header=', '--media='+PAPER_SIZE) )
p = Popen(cmd, shell=True, bufsize=0,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
child_stdin.write(text_out.encode( "utf-8" ))
out,err=p.communicate()
return render(request, 'close_window.html', {})
else:
lines = text_out.replace('\t',' ').replace('\n','<BR>')
return render(request, 'popup_lines.html', {'lines': lines, 'link_text': """<a href="#" onclick="window.print();return false;">Print</a>"""})
|
ph1l/ocemr
|
ocemr/views/visit.py
|
Python
|
gpl-3.0
| 23,918
|
[
"VisIt"
] |
e9f6f5fa66f7a614ffc98a67d40e972c0cb6a1abe670c08bd318d5039b94c4b5
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import BaseHTTPServer
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import webbrowser
from multiprocessing.pool import ThreadPool
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
LOGGER = logging.getLogger('upload')
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS = [
{'name': VCS_MERCURIAL,
'aliases': ['hg', 'mercurial']},
{'name': VCS_SUBVERSION,
'aliases': ['svn', 'subversion'],},
{'name': VCS_PERFORCE,
'aliases': ['p4', 'perforce']},
{'name': VCS_GIT,
'aliases': ['git']},
{'name': VCS_CVS,
'aliases': ['cvs']},
]
VCS_SHORT_NAMES = [] # hg, svn, ...
VCS_ABBREVIATIONS = {} # alias: name, ...
for vcs_entry in VCS:
VCS_SHORT_NAMES.append(min(vcs_entry['aliases'], key=len))
VCS_ABBREVIATIONS.update((alias, vcs_entry['name'])
for alias in vcs_entry['aliases'])
# OAuth 2.0-Related Constants
LOCALHOST_IP = '127.0.0.1'
DEFAULT_OAUTH2_PORT = 8001
ACCESS_TOKEN_PARAM = 'access_token'
ERROR_PARAM = 'error'
OAUTH_DEFAULT_ERROR_MESSAGE = 'OAuth 2.0 error occurred.'
OAUTH_PATH = '/get-access-token'
OAUTH_PATH_PORT_TEMPLATE = OAUTH_PATH + '?port=%(port)d'
AUTH_HANDLER_RESPONSE = """\
<html>
<head>
<title>Authentication Status</title>
<script>
window.onload = function() {
window.close();
}
</script>
</head>
<body>
<p>The authentication flow has completed.</p>
</body>
</html>
"""
# Borrowed from google-api-python-client
OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Your browser has been opened to visit:
%s
If your browser is on a different machine then exit and re-run
upload.py with the command-line parameter
--no_oauth2_webbrowser
"""
NO_OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Go to the following link in your browser:
%s
and copy the access token.
"""
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >> sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args["Error"]
self.info = args.get("Info", None)
@property
def reason(self):
# reason is a property on python 2.7 but a member variable on <=2.6.
# self.args is modified so it cannot be used as-is so save the value in
# self._reason.
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None,
extra_headers=None, save_cookies=False,
account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new AbstractRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
LOGGER.info("Server: %s; Host: %s", self.host, self.host_override)
else:
LOGGER.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
LOGGER.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for _ in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >> sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >> sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >> sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >> sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >> sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >> sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >> sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >> sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >> sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >> sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >> sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated and self.auth_function:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req, timeout=70)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
if not self.auth_function:
raise
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
# TODO: We should error out on a 500, but the server is too flaky
# for that at the moment.
StatusUpdate('Upload got a 500 response: %d' % e.code)
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if isinstance(self.auth_function, OAuth2Creds):
access_token = self.auth_function()
if access_token is not None:
self.extra_headers['Authorization'] = 'OAuth %s' % (access_token,)
self.authenticated = True
else:
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage=("%prog [options] [-- diff_options] [path...]\n"
"See also: http://code.google.com/p/rietveld/wiki/UploadPyUsage"),
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--oauth2", action="store_true",
dest="use_oauth2", default=False,
help="Use OAuth 2.0 instead of a password.")
group.add_option("--oauth2_port", action="store", type="int",
dest="oauth2_port", default=DEFAULT_OAUTH2_PORT,
help=("Port to use to handle OAuth 2.0 redirect. Must be an "
"integer in the range 1024-49151, defaults to "
"'%default'."))
group.add_option("--no_oauth2_webbrowser", action="store_false",
dest="open_oauth2_local_webbrowser", default=True,
help="Don't open a browser window to get an access token.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
group.add_option("-j", "--number-parallel-uploads",
dest="num_upload_threads", default=8,
help="Number of uploads to do in parallel.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("--project", action="store", dest="project",
help="The project the issue belongs to")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--target_ref", action="store", dest="target_ref",
default=None,
help="The target ref that is transitively tracked by the "
"local branch this patch comes from.")
parser.add_option("--cq_dry_run", action="store_true",
help="Send the patchset to do a CQ dry run right after "
"upload.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Explicitly specify version control system (%s)"
% ", ".join(VCS_SHORT_NAMES)))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Git-specific
group = parser.add_option_group("Git-specific options")
group.add_option("--git_similarity", action="store", dest="git_similarity",
metavar="SIM", type="int", default=50,
help=("Set the minimum similarity percentage for detecting "
"renames and copies. See `git diff -C`. (default 50)."))
group.add_option("--git_only_search_patch", action="store_false", default=True,
dest='git_find_copies_harder',
help="Removes --find-copies-harder when seaching for copies")
group.add_option("--git_no_find_copies", action="store_false", default=True,
dest="git_find_copies",
help=("Prevents git from looking for copies (default off)."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
# OAuth 2.0 Methods and Helpers
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters for an access token
or an error and then stops serving.
"""
access_token = None
error = None
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters into the server's
access_token or error and then stops serving.
"""
def SetResponseValue(self):
"""Stores the access token or error from the request on the server.
Will only do this if exactly one query parameter was passed in to the
request and that query parameter used 'access_token' or 'error' as the key.
"""
query_string = urlparse.urlparse(self.path).query
query_params = urlparse.parse_qs(query_string)
if len(query_params) == 1:
if query_params.has_key(ACCESS_TOKEN_PARAM):
access_token_list = query_params[ACCESS_TOKEN_PARAM]
if len(access_token_list) == 1:
self.server.access_token = access_token_list[0]
else:
error_list = query_params.get(ERROR_PARAM, [])
if len(error_list) == 1:
self.server.error = error_list[0]
def do_GET(self):
"""Handle a GET request.
Parses and saves the query parameters and prints a message that the server
has completed its lone task (handling a redirect).
Note that we can't detect if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.SetResponseValue()
self.wfile.write(AUTH_HANDLER_RESPONSE)
def log_message(self, format_str, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def OpenOAuth2ConsentPage(server=DEFAULT_REVIEW_SERVER,
port=DEFAULT_OAUTH2_PORT):
"""Opens the OAuth 2.0 consent page or prints instructions how to.
Uses the webbrowser module to open the OAuth server side page in a browser.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
A boolean indicating whether the page opened successfully.
"""
path = OAUTH_PATH_PORT_TEMPLATE % {'port': port}
parsed_url = urlparse.urlparse(server)
scheme = parsed_url[0] or 'https'
if scheme != 'https':
ErrorExit('Using OAuth requires a review server with SSL enabled.')
# If no scheme was given on command line the server address ends up in
# parsed_url.path otherwise in netloc.
host = parsed_url[1] or parsed_url[2]
page = '%s://%s%s' % (scheme, host, path)
page_opened = webbrowser.open(page, new=1, autoraise=True)
if page_opened:
print OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
return page_opened
def WaitForAccessToken(port=DEFAULT_OAUTH2_PORT):
"""Spins up a simple HTTP Server to handle a single request.
Intended to handle a single redirect from the production server after the
user authenticated via OAuth 2.0 with the server.
Args:
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
The access token passed to the localhost server, or None if no access token
was passed.
"""
httpd = ClientRedirectServer((LOCALHOST_IP, port), ClientRedirectHandler)
# Wait to serve just one request before deferring control back
# to the caller of wait_for_refresh_token
httpd.handle_request()
if httpd.access_token is None:
ErrorExit(httpd.error or OAUTH_DEFAULT_ERROR_MESSAGE)
return httpd.access_token
def GetAccessToken(server=DEFAULT_REVIEW_SERVER, port=DEFAULT_OAUTH2_PORT,
open_local_webbrowser=True):
"""Gets an Access Token for the current user.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_local_webbrowser: Boolean, defaults to True. If set, opens a page in
the user's browser.
Returns:
A string access token that was sent to the local server. If the serving page
via WaitForAccessToken does not receive an access token, this method
returns None.
"""
access_token = None
if open_local_webbrowser:
page_opened = OpenOAuth2ConsentPage(server=server, port=port)
if page_opened:
try:
access_token = WaitForAccessToken(port=port)
except socket.error, e:
print 'Can\'t start local webserver. Socket Error: %s\n' % (e.strerror,)
if access_token is None:
# TODO(dhermes): Offer to add to clipboard using xsel, xclip, pbcopy, etc.
page = 'https://%s%s' % (server, OAUTH_PATH)
print NO_OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
access_token = raw_input('Enter access token: ').strip()
return access_token
class KeyringCreds(object):
def __init__(self, server, host, email):
self.server = server
# Explicitly cast host to str to work around bug in old versions of Keyring
# (versions before 0.10). Even though newer versions of Keyring fix this,
# some modern linuxes (such as Ubuntu 12.04) still bundle a version with
# the bug.
self.host = str(host)
self.email = email
self.accounts_seen = set()
def GetUserCredentials(self):
"""Prompts the user for a username and password.
Only use keyring on the initial call. If the keyring contains the wrong
password, we want to give the user a chance to enter another one.
"""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
email = self.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % self.server)
password = None
if keyring and not email in self.accounts_seen:
try:
password = keyring.get_password(self.host, email)
except Exception:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
self.accounts_seen.add(email)
else:
password = getpass.getpass("Password for %s: " % email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(self.host, email, password)
self.accounts_seen.add(email)
return (email, password)
class OAuth2Creds(object):
"""Simple object to hold server and port to be passed to GetAccessToken."""
def __init__(self, server, port, open_local_webbrowser=True):
self.server = server
self.port = port
self.open_local_webbrowser = open_local_webbrowser
def __call__(self):
"""Uses stored server and port to retrieve OAuth 2.0 access token."""
return GetAccessToken(server=self.server, port=self.port,
open_local_webbrowser=self.open_local_webbrowser)
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE, use_oauth2=False,
oauth2_port=DEFAULT_OAUTH2_PORT,
open_oauth2_local_webbrowser=True):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
use_oauth2: Boolean indicating whether OAuth 2.0 should be used for
authentication.
oauth2_port: Integer, the port where the localhost server receiving the
redirect is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_oauth2_local_webbrowser: Boolean, defaults to True. If True and using
OAuth, this opens a page in the user's browser to obtain a token.
Returns:
A new HttpRpcServer, on which RPC calls can be made.
"""
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
LOGGER.info("Using debug user %s. Override with --email" % email)
server = HttpRpcServer(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
positional_args = [server]
if use_oauth2:
positional_args.append(
OAuth2Creds(server, oauth2_port, open_oauth2_local_webbrowser))
else:
positional_args.append(KeyringCreds(server, host, email).GetUserCredentials)
return HttpRpcServer(*positional_args,
host_override=host_override,
save_cookies=save_cookies,
account_type=account_type)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-%s-' % sum(hash(f) for f in files)
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=None):
"""Run a command and return output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
env: environment variable dictionary (default: os.environ).
Returns:
Tuple (stdout, stderr, return code)
"""
LOGGER.info("Running %s", command)
if env is None:
env = os.environ
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >> sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=None):
"""Run a command and return output from stdout and the return code."""
if env is None:
env = os.environ
out, _, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=None):
if env is None:
env = os.environ
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
_, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
base_or_cur = "base"
else:
base_or_cur = "current"
if len(content) > MAX_UPLOAD_SIZE:
result = ("Not uploading the %s file for %s because it's too large." %
(base_or_cur, filename))
file_too_large = True
content = ""
elif options.verbose:
result = "Uploading %s file for %s" % (base_or_cur, filename)
checksum = md5(content).hexdigest()
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload file for %s. Got %d status code." %
(filename, e.code))
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return result
patches = dict()
for k, v in patch_list:
patches.setdefault(v, k)
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, base_content, is_binary, status, True))
threads.append(t)
if new_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, new_content, is_binary, status, False))
threads.append(t)
for t in threads:
print t.get(timeout=60)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return (mimetype.startswith("image/") and
not mimetype.startswith("image/svg"))
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
# pylint: disable=unused-argument
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
LOGGER.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
for line in RunShell(["svn", "info"]).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
LOGGER.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file_obj = open(filename, 'rb')
result = ""
try:
result = file_obj.read()
finally:
file_obj.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location '
'for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if "GIT_EXTERNAL_DIFF" in env:
del env["GIT_EXTERNAL_DIFF"]
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So, get a diff with ONLY deletes, then
# append a diff (with rename detection), without deletes.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules", "--src-prefix=a/", "--dst-prefix=b/",
]
diff = RunShell(
cmd + ["--no-renames", "--diff-filter=D"] + extra_args,
env=env, silent_ok=True)
assert 0 <= self.options.git_similarity <= 100
if self.options.git_find_copies:
similarity_options = ["-l100000", "-C%d%%" % self.options.git_similarity]
if self.options.git_find_copies_harder:
similarity_options.append("--find-copies-harder")
else:
similarity_options = ["-M%d%%" % self.options.git_similarity ]
diff += RunShell(
cmd + ["--diff-filter=AMCRT"] + similarity_options + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=False)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True,
universal_newlines=False)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before)
is_binary = self.IsImage(filename)
if base_content:
is_binary = is_binary or self.IsBinaryData(base_content)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if hash_after:
new_content = self.GetFileContent(hash_after)
is_binary = is_binary or self.IsBinaryData(new_content)
if not is_binary:
new_content = None
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
LOGGER.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
LOGGER.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
# pylint: disable=unused-argument
def GetFileProperties(self, property_key_prefix="", command="describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at
# permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
_, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
_, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
def UploadFile(filename, data):
form_fields = [("filename", filename)]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload patch for %s. Got %d status code." %
(filename, e.code))
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return ("Uploaded patch for " + filename, [lines[1], filename])
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
filename = patch[0]
data = patch[1]
t = thread_pool.apply_async(UploadFile, args=(filename, data))
threads.append(t)
for t in threads:
result = t.get(timeout=60)
print result[0]
rv.append(result[1])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, _):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs_name = options.vcs
if not vcs_name:
vcs_name = os.environ.get("CODEREVIEW_VCS")
if vcs_name:
v = VCS_ABBREVIATIONS.get(vcs_name.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs_name)
(vcs_name, extra_output) = (v, None)
else:
(vcs_name, extra_output) = GuessVCSName(options)
if vcs_name == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs_name == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs_name == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs_name == VCS_GIT:
return GitVCS(options)
elif vcs_name == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = (
"Use '--help -v' to show additional Perforce options. "
"For more help, see "
"http://code.google.com/p/rietveld/wiki/CodeReviewHelp"
)
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
LOGGER.setLevel(logging.DEBUG)
elif verbosity >= 2:
LOGGER.setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
LOGGER.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.use_oauth2:
options.save_cookies = False
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type,
options.use_oauth2,
options.oauth2_port,
options.open_oauth2_local_webbrowser)
form_fields = []
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
LOGGER.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
if options.project:
form_fields.append(("project", options.project))
if options.target_ref:
form_fields.append(("target_ref", options.target_ref))
if options.cq_dry_run:
form_fields.append(("cq_dry_run", "1"))
form_fields.append(("commit", "1"))
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file_obj = open(options.file, 'r')
message = file_obj.read()
file_obj.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for filename, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + filename
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
nicko96/Chrome-Infra
|
appengine/chromium_rietveld/upload.py
|
Python
|
bsd-3-clause
| 100,822
|
[
"VisIt"
] |
a5aa5356014496c5d2ef63b7f023532caa8dfdb83b62b1af44d783d60916285d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import logging
import numpy as np
import time
from pymatgen.core.structure import Structure
from pymatgen.core.sites import PeriodicSite
from monty.json import MSONable
from scipy.spatial import Voronoi
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import my_solid_angle
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import get_lower_and_upper_f
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import rectangle_surface_intersection
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.math_utils import normal_cdf_step
"""
This module contains the object used to describe the possible bonded atoms based on a Voronoi analysis
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
def from_bson_voronoi_list(bson_nb_voro_list, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list)
for isite, voro in enumerate(bson_nb_voro_list):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site._properties)
voronoi_list[isite].append((periodic_site, dd))
return voronoi_list
def from_bson_voronoi_list2(bson_nb_voro_list2, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list2)
for isite, voro in enumerate(bson_nb_voro_list2):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site._properties)
dd['site'] = periodic_site
voronoi_list[isite].append(dd)
return voronoi_list
class DetailedVoronoiContainer(MSONable):
"""
Class used to store the full Voronoi of a given structure.
"""
AC = AdditionalConditions()
default_voronoi_cutoff = 10.0
default_normalized_distance_tolerance = 1e-5
default_normalized_angle_tolerance = 1e-3
def __init__(self, structure=None, voronoi_list=None, voronoi_list2=None,
voronoi_cutoff=default_voronoi_cutoff, isites=None,
normalized_distance_tolerance=default_normalized_distance_tolerance,
normalized_angle_tolerance=default_normalized_angle_tolerance,
additional_conditions=None, valences=None,
maximum_distance_factor=None, minimum_angle_factor=None):
"""
Constructor for the VoronoiContainer object. Either a structure is given, in which case the Voronoi is
computed, or the different components of the VoronoiContainer are given (used in the from_dict method)
:param structure: Structure for which the Voronoi is computed
:param voronoi_list: List of voronoi polyhedrons for each site
:param voronoi_cutoff: cutoff used for the voronoi
:param isites: indices of sites for which the Voronoi has to be computed
:raise: RuntimeError if the Voronoi cannot be constructed
"""
self.normalized_distance_tolerance = normalized_distance_tolerance
self.normalized_angle_tolerance = normalized_angle_tolerance
if additional_conditions is None:
self.additional_conditions = [self.AC.NONE, self.AC.ONLY_ACB]
else:
self.additional_conditions = additional_conditions
self.valences = valences
self.maximum_distance_factor = maximum_distance_factor
self.minimum_angle_factor = minimum_angle_factor
if isites is None:
indices = list(range(len(structure)))
else:
indices = isites
self.structure = structure
logging.info('Setting Voronoi list')
if voronoi_list2 is not None:
self.voronoi_list2 = voronoi_list2
else:
self.setup_voronoi_list(indices=indices, voronoi_cutoff=voronoi_cutoff)
logging.info('Setting neighbors distances and angles')
t1 = time.clock()
self.setup_neighbors_distances_and_angles(indices=indices)
t2 = time.clock()
logging.info('Neighbors distances and angles set up in {:.2f} seconds'.format(t2-t1))
def setup_voronoi_list(self, indices, voronoi_cutoff):
"""
Set up of the voronoi list of neighbours by calling qhull
:param indices: indices of the sites for which the Voronoi is needed
:param voronoi_cutoff: Voronoi cutoff for the search of neighbours
:raise RuntimeError: If an infinite vertex is found in the voronoi construction
"""
self.voronoi_list2 = [None] * len(self.structure)
self.voronoi_list_coords = [None] * len(self.structure)
logging.info('Getting all neighbors in structure')
struct_neighbors = self.structure.get_all_neighbors(voronoi_cutoff, include_index=True)
t1 = time.clock()
logging.info('Setting up Voronoi list :')
for jj, isite in enumerate(indices):
logging.info(' - Voronoi analysis for site #{:d} ({:d}/{:d})'.format(isite, jj+1, len(indices)))
site = self.structure[isite]
neighbors1 = [(site, 0.0, isite)]
neighbors1.extend(struct_neighbors[isite])
distances = [i[1] for i in sorted(neighbors1, key=lambda s: s[1])]
neighbors = [i[0] for i in sorted(neighbors1, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(points=qvoronoi_input, qhull_options="o Fv")
all_vertices = voro.vertices
results2 = []
maxangle = 0.0
mindist = 10000.0
for iridge, ridge_points in enumerate(voro.ridge_points):
if 0 in ridge_points:
ridge_vertices_indices = voro.ridge_vertices[iridge]
if -1 in ridge_vertices_indices:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
ridge_point2 = max(ridge_points)
facets = [all_vertices[i] for i in ridge_vertices_indices]
sa = my_solid_angle(site.coords, facets)
maxangle = max([sa, maxangle])
mindist = min([mindist, distances[ridge_point2]])
for iii, sss in enumerate(self.structure):
if neighbors[ridge_point2].is_periodic_image(sss):
myindex = iii
break
results2.append({'site': neighbors[ridge_point2],
'angle': sa,
'distance': distances[ridge_point2],
'index': myindex})
for dd in results2:
dd['normalized_angle'] = dd['angle'] / maxangle
dd['normalized_distance'] = dd['distance'] / mindist
self.voronoi_list2[isite] = results2
self.voronoi_list_coords[isite] = np.array([dd['site'].coords for dd in results2])
t2 = time.clock()
logging.info('Voronoi list set up in {:.2f} seconds'.format(t2-t1))
def setup_neighbors_distances_and_angles(self, indices):
"""
Initializes the angle and distance separations
:param indices: indices of the sites for which the Voronoi is needed
"""
self.neighbors_distances = [None] * len(self.structure)
self.neighbors_normalized_distances = [None] * len(self.structure)
self.neighbors_angles = [None] * len(self.structure)
self.neighbors_normalized_angles = [None] * len(self.structure)
for isite in indices:
results = self.voronoi_list2[isite]
if results is None:
continue
#Initializes neighbors distances and normalized distances groups
self.neighbors_distances[isite] = []
self.neighbors_normalized_distances[isite] = []
normalized_distances = [nb_dict['normalized_distance'] for nb_dict in results]
isorted_distances = np.argsort(normalized_distances)
self.neighbors_normalized_distances[isite].append({'min': normalized_distances[isorted_distances[0]],
'max': normalized_distances[isorted_distances[0]]})
self.neighbors_distances[isite].append({'min': results[isorted_distances[0]]['distance'],
'max': results[isorted_distances[0]]['distance']})
icurrent = 0
nb_indices = {int(isorted_distances[0])}
dnb_indices = {int(isorted_distances[0])}
for idist in iter(isorted_distances):
wd = normalized_distances[idist]
if self.maximum_distance_factor is not None:
if wd > self.maximum_distance_factor:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
break
if np.isclose(wd, self.neighbors_normalized_distances[isite][icurrent]['max'],
rtol=0.0, atol=self.normalized_distance_tolerance):
self.neighbors_normalized_distances[isite][icurrent]['max'] = wd
self.neighbors_distances[isite][icurrent]['max'] = results[idist]['distance']
dnb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
dnb_indices = {int(idist)}
self.neighbors_normalized_distances[isite].append({'min': wd,
'max': wd})
self.neighbors_distances[isite].append({'min': results[idist]['distance'],
'max': results[idist]['distance']})
icurrent += 1
nb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
for idist in range(len(self.neighbors_distances[isite]) - 1):
dist_dict = self.neighbors_distances[isite][idist]
dist_dict_next = self.neighbors_distances[isite][idist+1]
dist_dict['next'] = dist_dict_next['min']
ndist_dict = self.neighbors_normalized_distances[isite][idist]
ndist_dict_next = self.neighbors_normalized_distances[isite][idist + 1]
ndist_dict['next'] = ndist_dict_next['min']
if self.maximum_distance_factor is not None:
dfact = self.maximum_distance_factor
else:
dfact = self.default_voronoi_cutoff / self.neighbors_distances[isite][0]['min']
self.neighbors_normalized_distances[isite][-1]['next'] = dfact
self.neighbors_distances[isite][-1]['next'] = dfact * self.neighbors_distances[isite][0]['min']
#Initializes neighbors angles and normalized angles groups
self.neighbors_angles[isite] = []
self.neighbors_normalized_angles[isite] = []
normalized_angles = [nb_dict['normalized_angle'] for nb_dict in results]
isorted_angles = np.argsort(normalized_angles)[::-1]
self.neighbors_normalized_angles[isite].append({'max': normalized_angles[isorted_angles[0]],
'min': normalized_angles[isorted_angles[0]]})
self.neighbors_angles[isite].append({'max': results[isorted_angles[0]]['angle'],
'min': results[isorted_angles[0]]['angle']})
icurrent = 0
nb_indices = {int(isorted_angles[0])}
dnb_indices = {int(isorted_angles[0])}
for iang in iter(isorted_angles):
wa = normalized_angles[iang]
if self.minimum_angle_factor is not None:
if wa < self.minimum_angle_factor:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
break
if np.isclose(wa, self.neighbors_normalized_angles[isite][icurrent]['min'],
rtol=0.0, atol=self.normalized_angle_tolerance):
self.neighbors_normalized_angles[isite][icurrent]['min'] = wa
self.neighbors_angles[isite][icurrent]['min'] = results[iang]['angle']
dnb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
dnb_indices = {int(iang)}
self.neighbors_normalized_angles[isite].append({'max': wa,
'min': wa})
self.neighbors_angles[isite].append({'max': results[iang]['angle'],
'min': results[iang]['angle']})
icurrent += 1
nb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
for iang in range(len(self.neighbors_angles[isite]) - 1):
ang_dict = self.neighbors_angles[isite][iang]
ang_dict_next = self.neighbors_angles[isite][iang + 1]
ang_dict['next'] = ang_dict_next['max']
nang_dict = self.neighbors_normalized_angles[isite][iang]
nang_dict_next = self.neighbors_normalized_angles[isite][iang + 1]
nang_dict['next'] = nang_dict_next['max']
if self.minimum_angle_factor is not None:
afact = self.minimum_angle_factor
else:
afact = 0.0
self.neighbors_normalized_angles[isite][-1]['next'] = afact
self.neighbors_angles[isite][-1]['next'] = afact * self.neighbors_angles[isite][0]['max']
def _precompute_additional_conditions(self, ivoronoi, voronoi, valences):
additional_conditions = {ac: [] for ac in self.additional_conditions}
for ips, (ps, vals) in enumerate(voronoi):
for ac in self.additional_conditions:
additional_conditions[ac].append(self.AC.check_condition(condition=ac, structure=self.structure,
parameters={'valences': valences,
'neighbor_index': vals['index'],
'site_index': ivoronoi}))
return additional_conditions
def _precompute_distance_conditions(self, ivoronoi, voronoi):
distance_conditions = []
for idp, dp_dict in enumerate(self.neighbors_normalized_distances[ivoronoi]):
distance_conditions.append([])
dp = dp_dict['max']
for ips, (ps, vals) in enumerate(voronoi):
distance_conditions[idp].append(vals['normalized_distance'] <= dp or
np.isclose(vals['normalized_distance'], dp,
rtol=0.0, atol=self.normalized_distance_tolerance/2.0))
return distance_conditions
def _precompute_angle_conditions(self, ivoronoi, voronoi):
angle_conditions = []
for iap, ap_dict in enumerate(self.neighbors_normalized_angles[ivoronoi]):
angle_conditions.append([])
ap = ap_dict['max']
for ips, (ps, vals) in enumerate(voronoi):
angle_conditions[iap].append(vals['normalized_angle'] >= ap or
np.isclose(vals['normalized_angle'], ap,
rtol=0.0, atol=self.normalized_angle_tolerance/2.0))
return angle_conditions
def neighbors_map(self, isite, distfactor, angfactor, additional_condition):
if self.neighbors_normalized_distances[isite] is None:
return None
dist_where = np.argwhere(np.array([wd['min'] for wd in self.neighbors_normalized_distances[isite]]) <= distfactor)
if len(dist_where) == 0:
return None
idist = dist_where[-1][0]
ang_where = np.argwhere(np.array([wa['max'] for wa in self.neighbors_normalized_angles[isite]]) >= angfactor)
if len(ang_where) == 0:
return None
iang = ang_where[0][0]
if self.additional_conditions.count(additional_condition) != 1:
return None
i_additional_condition = self.additional_conditions.index(additional_condition)
return {'i_distfactor': idist, 'i_angfactor': iang, 'i_additional_condition': i_additional_condition}
def neighbors_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0):
if self.voronoi_list2[isite] is None:
return None
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite, surface_calculation_type, max_dist)
distance_bounds = bounds_and_limits['distance_bounds']
angle_bounds = bounds_and_limits['angle_bounds']
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float)
for idp in range(len(distance_bounds) - 1):
this_dist_plateau = distance_bounds[idp + 1] - distance_bounds[idp]
for iap in range(len(angle_bounds) - 1):
this_ang_plateau = angle_bounds[iap + 1] - angle_bounds[iap]
surfaces[idp][iap] = np.absolute(this_dist_plateau*this_ang_plateau)
return surfaces
def neighbors_surfaces_bounded(self, isite, surface_calculation_options=None):
if self.voronoi_list2[isite] is None:
return None
if surface_calculation_options is None:
surface_calculation_options = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.2, 'upper': 1.8},
'angle_bounds': {'lower': 0.1, 'upper': 0.8}}
if surface_calculation_options['type'] in ['standard_elliptic', 'standard_diamond', 'standard_spline']:
plot_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized', None)}
else:
raise ValueError('Type "{}" for the surface calculation in DetailedVoronoiContainer '
'is invalid'.format(surface_calculation_options['type']))
max_dist = surface_calculation_options['distance_bounds']['upper'] + 0.1
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite=isite,
plot_type=plot_type,
max_dist=max_dist)
distance_bounds = bounds_and_limits['distance_bounds']
angle_bounds = bounds_and_limits['angle_bounds']
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_calculation_options)
mindist = surface_calculation_options['distance_bounds']['lower']
maxdist = surface_calculation_options['distance_bounds']['upper']
minang = surface_calculation_options['angle_bounds']['lower']
maxang = surface_calculation_options['angle_bounds']['upper']
f_lower = lower_and_upper_functions['lower']
f_upper = lower_and_upper_functions['upper']
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float)
for idp in range(len(distance_bounds) - 1):
dp1 = distance_bounds[idp]
dp2 = distance_bounds[idp+1]
if dp2 < mindist or dp1 > maxdist:
continue
if dp1 < mindist:
d1 = mindist
else:
d1 = dp1
if dp2 > maxdist:
d2 = maxdist
else:
d2 = dp2
for iap in range(len(angle_bounds) - 1):
ap1 = angle_bounds[iap]
ap2 = angle_bounds[iap+1]
if ap1 > ap2:
ap1 = angle_bounds[iap + 1]
ap2 = angle_bounds[iap]
if ap2 < minang or ap1 > maxang:
continue
intersection, interror = rectangle_surface_intersection(rectangle=((d1, d2),
(ap1, ap2)),
f_lower=f_lower,
f_upper=f_upper,
bounds_lower=[mindist, maxdist],
bounds_upper=[mindist, maxdist],
check=False)
surfaces[idp][iap] = intersection
return surfaces
@staticmethod
def _get_vertices_dist_ang_indices(parameter_indices_list):
pp0 = [pp[0] for pp in parameter_indices_list]
pp1 = [pp[1] for pp in parameter_indices_list]
min_idist = min(pp0)
min_iang = min(pp1)
max_idist = max(pp0)
max_iang = max(pp1)
i_min_angs = np.argwhere(np.array(pp1) == min_iang)
i_max_dists = np.argwhere(np.array(pp0) == max_idist)
pp0_at_min_iang = [pp0[ii[0]] for ii in i_min_angs]
pp1_at_max_idist = [pp1[ii[0]] for ii in i_max_dists]
max_idist_at_min_iang = max(pp0_at_min_iang)
min_iang_at_max_idist = min(pp1_at_max_idist)
p1 = (min_idist, min_iang)
p2 = (max_idist_at_min_iang, min_iang)
p3 = (max_idist_at_min_iang, min_iang_at_max_idist)
p4 = (max_idist, min_iang_at_max_idist)
p5 = (max_idist, max_iang)
p6 = (min_idist, max_iang)
return [p1, p2, p3, p4, p5, p6]
def maps_and_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0, additional_conditions=None):
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces(isite=isite, surface_calculation_type=surface_calculation_type,
max_dist=max_dist)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items():
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append({'map': (cn, imap), 'surface': thissurf,
'parameters_indices': list_parameters_indices})
return maps_and_surfaces
def maps_and_surfaces_bounded(self, isite, surface_calculation_options=None, additional_conditions=None):
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces_bounded(isite=isite, surface_calculation_options=surface_calculation_options)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items():
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append({'map': (cn, imap), 'surface': thissurf,
'parameters_indices': list_parameters_indices})
return maps_and_surfaces
def neighbors(self, isite, distfactor, angfactor, additional_condition=None):
idist = None
dfact = None
for iwd, wd in enumerate(self.neighbors_normalized_distances[isite]):
if distfactor >= wd['min']:
idist = iwd
dfact = wd['max']
else:
break
iang = None
afact = None
for iwa, wa in enumerate(self.neighbors_normalized_angles[isite]):
if angfactor <= wa['max']:
iang = iwa
afact = wa['min']
else:
break
if idist is None or iang is None:
raise ValueError('Distance or angle parameter not found ...')
return [nb for nb in self.voronoi_list2[isite] if
nb['normalized_distance'] <= dfact and nb['normalized_angle'] >= afact]
def voronoi_parameters_bounds_and_limits(self, isite, plot_type, max_dist):
#Initializes the distance and angle parameters
if self.voronoi_list2[isite] is None:
return None
if plot_type is None:
plot_type = {'distance_parameter': ('initial_inverse_opposite', None),
'angle_parameter': ('initial_opposite', None)}
dd = [dist['min'] for dist in self.neighbors_normalized_distances[isite]]
dd[0] = 1.0
if plot_type['distance_parameter'][0] == 'initial_normalized':
dd.append(max_dist)
distance_bounds = np.array(dd)
dist_limits = [1.0, max_dist]
elif plot_type['distance_parameter'][0] == 'initial_inverse_opposite':
ddinv = [1.0 / dist for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
elif plot_type['distance_parameter'][0] == 'initial_inverse3_opposite':
ddinv = [1.0 / dist**3.0 for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
else:
raise NotImplementedError('Plotting type "{}" '
'for the distance is not implemented'.format(plot_type['distance_parameter']))
if plot_type['angle_parameter'][0] == 'initial_normalized':
aa = [0.0]
aa.extend([ang['max'] for ang in self.neighbors_normalized_angles[isite]])
angle_bounds = np.array(aa)
elif plot_type['angle_parameter'][0] == 'initial_opposite':
aa = [0.0]
aa.extend([ang['max'] for ang in self.neighbors_normalized_angles[isite]])
aa = [1.0 - ang for ang in aa]
angle_bounds = np.array(aa)
else:
raise NotImplementedError('Plotting type "{}" '
'for the angle is not implemented'.format(plot_type['angle_parameter']))
ang_limits = [0.0, 1.0]
return {'distance_bounds': distance_bounds, 'distance_limits': dist_limits,
'angle_bounds': angle_bounds, 'angle_limits': ang_limits}
def is_close_to(self, other, rtol=0.0, atol=1e-8):
isclose = (np.isclose(self.normalized_angle_tolerance, other.normalized_angle_tolerance,
rtol=rtol, atol=atol) and
np.isclose(self.normalized_distance_tolerance, other.normalized_distance_tolerance,
rtol=rtol, atol=atol) and
self.additional_conditions == other.additional_conditions and
self.valences == other.valences)
if not isclose:
return isclose
for isite, site_voronoi in enumerate(self.voronoi_list2):
self_to_other_nbs = {}
for inb, nb in enumerate(site_voronoi):
if nb is None:
if other.voronoi_list2[isite] is None:
continue
else:
return False
else:
if other.voronoi_list2[isite] is None:
return False
nb_other = None
for inb2, nb2 in enumerate(other.voronoi_list2[isite]):
if nb['site'] == nb2['site']:
self_to_other_nbs[inb] = inb2
nb_other = nb2
break
if nb_other is None:
return False
if not np.isclose(nb['distance'], nb_other['distance'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['angle'], nb_other['angle'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['normalized_distance'], nb_other['normalized_distance'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['normalized_angle'], nb_other['normalized_angle'],
rtol=rtol, atol=atol):
return False
if nb['index'] != nb_other['index']:
return False
if nb['site'] != nb_other['site']:
return False
return True
def get_rdf_figure(self, isite, normalized=True, figsize=None,
step_function=None):
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, 3.0)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {'type': 'normal_cdf', 'scale': 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
dists = self.neighbors_normalized_distances[isite]
else:
dists = self.neighbors_distances[isite]
if step_function['type'] == 'step_function':
isorted = np.argsort([dd['min'] for dd in dists])
sorted_dists = [dists[ii]['min'] for ii in isorted]
dnb_dists = [len(dists[ii]['dnb_indices']) for ii in isorted]
xx = [0.0]
yy = [0.0]
for idist, dist in enumerate(sorted_dists):
xx.append(dist)
xx.append(dist)
yy.append(yy[-1])
yy.append(yy[-1]+dnb_dists[idist])
xx.append(1.1*xx[-1])
yy.append(yy[-1])
elif step_function['type'] == 'normal_cdf':
scale = step_function['scale']
mydists = [dp_func(dd['min']) for dd in dists]
mydcns = [len(dd['dnb_indices']) for dd in dists]
xx = np.linspace(0.0, 1.1*max(mydists), num=500)
yy = np.zeros_like(xx)
for idist, dist in enumerate(mydists):
yy += mydcns[idist] * normal_cdf_step(xx, mean=dist, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function['type']))
subplot.plot(xx, yy)
return fig
def get_sadf_figure(self, isite, normalized=True, figsize=None,
step_function=None):
def ap_func(ap):
return np.power(ap, -0.1)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {'type': 'step_function', 'scale': 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
angs = self.neighbors_normalized_angles[isite]
else:
angs = self.neighbors_angles[isite]
if step_function['type'] == 'step_function':
isorted = np.argsort([ap_func(aa['min']) for aa in angs])
sorted_angs = [ap_func(angs[ii]['min']) for ii in isorted]
dnb_angs = [len(angs[ii]['dnb_indices']) for ii in isorted]
xx = [0.0]
yy = [0.0]
for iang, ang in enumerate(sorted_angs):
xx.append(ang)
xx.append(ang)
yy.append(yy[-1])
yy.append(yy[-1]+dnb_angs[iang])
xx.append(1.1*xx[-1])
yy.append(yy[-1])
elif step_function['type'] == 'normal_cdf':
scale = step_function['scale']
myangs = [ap_func(aa['min']) for aa in angs]
mydcns = [len(dd['dnb_indices']) for dd in angs]
xx = np.linspace(0.0, 1.1*max(myangs), num=500)
yy = np.zeros_like(xx)
for iang, ang in enumerate(myangs):
yy += mydcns[iang] * normal_cdf_step(xx, mean=ang, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function['type']))
subplot.plot(xx, yy)
return fig
def __eq__(self, other):
return (self.normalized_angle_tolerance == other.normalized_angle_tolerance and
self.normalized_distance_tolerance == other.normalized_distance_tolerance and
self.additional_conditions == other.additional_conditions and
self.valences == other.valences and
self.voronoi_list2 == other.voronoi_list2 and
self.structure == other.structure)
def __ne__(self, other):
return not self == other
def to_bson_voronoi_list2(self):
"""
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
:return: [vlist, bson_nb_voro_list], to be used in the as_dict method
"""
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == 'None':
continue
site_voro = []
# {'site': neighbors[nn[1]],
# 'angle': sa,
# 'distance': distances[nn[1]],
# 'index': myindex}
for nb_dict in voro:
site = nb_dict['site']
site_dict = {key: val for key, val in nb_dict.items() if key not in ['site']}
#site_voro.append([ps.as_dict(), dd]) [float(c) for c in self._fcoords]
diff = site._fcoords - self.structure[nb_dict['index']]._fcoords
site_voro.append([[nb_dict['index'], [float(c) for c in diff]],
site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
:return: dictionary that is BSON-encodable
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object
"""
structure = Structure.from_dict(d['structure'])
voronoi_list2 = from_bson_voronoi_list2(d['bson_nb_voro_list2'], structure)
maximum_distance_factor = d['maximum_distance_factor'] if 'maximum_distance_factor' in d else None
minimum_angle_factor = d['minimum_angle_factor'] if 'minimum_angle_factor' in d else None
return cls(structure=structure, voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d['normalized_angle_tolerance'],
normalized_distance_tolerance=d['normalized_distance_tolerance'],
additional_conditions=d['additional_conditions'],
valences=d['valences'],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor)
|
czhengsci/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/voronoi.py
|
Python
|
mit
| 40,650
|
[
"pymatgen"
] |
b501a3e98e18cef05c823d435fb8ead845086feeddf92c4d7a98f5323ed3818b
|
from nanopore.analyses.abstractAnalysis import AbstractAnalysis
from nanopore.analyses.utils import AlignedPair, getFastaDictionary, getFastqDictionary, samIterator
import os
import numpy
import pysam
import xml.etree.cElementTree as ET
from jobTree.src.bioio import reverseComplement, fastaRead, fastqRead, prettyXml, system
from itertools import chain
class ReadAlignmentCoverageCounter:
"""Counts coverage from a pairwise alignment.
Global alignment means the entire reference and read sequences (trailing indels).
"""
def __init__(self, readSeqName, readSeq, refSeqName, refSeq, alignedRead, globalAlignment=False):
self.matches = 0
self.mismatches = 0
self.ns = 0
self.totalReadInsertionLength = 0
self.totalReadInsertions = 0
self.totalReadDeletionLength = 0
self.totalReadDeletions = 0
self.readSeqName = readSeqName
self.readSeq = readSeq
self.refSeqName = refSeqName
self.refSeq = refSeq
self.globalAlignment = globalAlignment
#Now process the read alignment
totalReadInsertionLength, totalReadDeletionLength = 0, 0
aP = None
for aP in AlignedPair.iterator(alignedRead, self.refSeq, self.readSeq):
if aP.isMatch():
self.matches += 1
elif aP.isMismatch():
self.mismatches += 1
else:
self.ns += 1
if aP.getPrecedingReadInsertionLength(self.globalAlignment) > 0:
self.totalReadInsertions += 1
totalReadInsertionLength += aP.getPrecedingReadInsertionLength(self.globalAlignment)
if aP.getPrecedingReadDeletionLength(self.globalAlignment) > 0:
self.totalReadDeletions += 1
totalReadDeletionLength += aP.getPrecedingReadDeletionLength(self.globalAlignment)
if self.globalAlignment and aP != None: #If global alignment account for any trailing indels
assert len(self.refSeq) - aP.refPos - 1 >= 0
if len(self.refSeq) - aP.refPos - 1 > 0:
self.totalReadDeletions += 1
self.totalReadDeletionLength += len(self.refSeq) - aP.refPos - 1
if alignedRead.is_reverse:
aP.readPos >= 0
if aP.readPos > 0:
self.totalReadInsertions += 1
totalReadInsertionLength += aP.readPos
else:
assert len(self.readSeq) - aP.readPos - 1 >= 0
if len(self.readSeq) - aP.readPos - 1 > 0:
self.totalReadInsertions += 1
totalReadInsertionLength += len(self.readSeq) - aP.readPos - 1
assert totalReadInsertionLength <= len(self.readSeq)
assert totalReadDeletionLength <= len(self.refSeq)
self.totalReadInsertionLength += totalReadInsertionLength
self.totalReadDeletionLength += totalReadDeletionLength
def readCoverage(self):
return AbstractAnalysis.formatRatio(self.matches + self.mismatches, self.matches + self.mismatches + self.totalReadInsertionLength)
def referenceCoverage(self):
return AbstractAnalysis.formatRatio(self.matches + self.mismatches, self.matches + self.mismatches + self.totalReadDeletionLength)
def identity(self):
return AbstractAnalysis.formatRatio(self.matches, self.matches + self.mismatches + self.totalReadInsertionLength)
def mismatchesPerReadBase(self):
return AbstractAnalysis.formatRatio(self.mismatches, self.matches + self.mismatches)
def deletionsPerReadBase(self):
return AbstractAnalysis.formatRatio(self.totalReadDeletions, self.matches + self.mismatches)
def insertionsPerReadBase(self):
return AbstractAnalysis.formatRatio(self.totalReadInsertions, self.matches + self.mismatches)
def readLength(self):
return len(self.readSeq)
def getXML(self):
return ET.Element("readAlignmentCoverage", { "refSeqName":self.refSeqName,
"readSeqName":self.readSeqName, "readLength":str(self.readLength()),
"readCoverage":str(self.readCoverage()),
"referenceCoverage":str(self.referenceCoverage()),
"identity":str(self.identity()),
"mismatchesPerReadBase":str(self.mismatchesPerReadBase()),
"insertionsPerReadBase":str(self.insertionsPerReadBase()),
"deletionsPerReadBase":str(self.deletionsPerReadBase()) })
def getAggregateCoverageStats(readAlignmentCoverages, tagName, refSequences, readSequences, readsToReadAlignmentCoverages, typeof):
"""Calculates aggregate stats across a set of read alignments, plots distributions.
"""
if typeof == "coverage_all":
mappedReadLengths = [ [len(readSequences[i])] * len(readsToReadAlignmentCoverages[i]) for i in readSequences.keys() if i in readsToReadAlignmentCoverages ]
mappedReadLengths = list(chain(*mappedReadLengths))
else:
mappedReadLengths = [ len(readSequences[i]) for i in readSequences.keys() if i in readsToReadAlignmentCoverages ]
unmappedReadLengths = [ len(readSequences[i]) for i in readSequences.keys() if i not in readsToReadAlignmentCoverages ]
def stats(fnStringName):
l = map(lambda x : getattr(x, fnStringName)(), readAlignmentCoverages)
l2 = l[:]
l2.sort()
return l2[0], numpy.average(l2), numpy.median(l2), l2[-1], " ".join(map(str, l))
attribs = { "numberOfReadAlignments":str(len(readAlignmentCoverages)),
"numberOfReads":str(len(readSequences)), "numberOfReferenceSequences":str(len(refSequences)),
"numberOfMappedReads":str(len(mappedReadLengths)), "mappedReadLengths":" ".join(map(str, mappedReadLengths)),
"numberOfUnmappedReads":str(len(unmappedReadLengths)), "unmappedReadLengths":" ".join(map(str, unmappedReadLengths)), }
for fnStringName in "readCoverage", "referenceCoverage", "identity", "mismatchesPerReadBase", "deletionsPerReadBase", "insertionsPerReadBase", "readLength":
for attribName, value in zip([ "min" + fnStringName, "avg" + fnStringName, "median" + fnStringName, "max" + fnStringName,
"distribution" + fnStringName ], list(stats(fnStringName))):
attribs[attribName] = str(value)
parentNode = ET.Element(tagName, attribs)
for readAlignmentCoverage in readAlignmentCoverages:
parentNode.append(readAlignmentCoverage.getXML())
return parentNode
class LocalCoverage(AbstractAnalysis):
"""Calculates coverage, treating alignments as local alignments.
"""
def run(self, globalAlignment=False):
AbstractAnalysis.run(self) #Call base method to do some logging
refSequences = getFastaDictionary(self.referenceFastaFile) #Hash of names to sequences
readSequences = getFastqDictionary(self.readFastqFile) #Hash of names to sequences
sam = pysam.Samfile(self.samFile, "r" )
readsToReadCoverages = {}
for aR in samIterator(sam): #Iterate on the sam lines
refSeq = refSequences[sam.getrname(aR.rname)]
readSeq = readSequences[aR.qname]
readAlignmentCoverageCounter = ReadAlignmentCoverageCounter(aR.qname, readSeq, sam.getrname(aR.rname), refSeq, aR, globalAlignment)
if aR.qname not in readsToReadCoverages:
readsToReadCoverages[aR.qname] = []
readsToReadCoverages[aR.qname].append(readAlignmentCoverageCounter)
sam.close()
#Write out the coverage info for differing subsets of the read alignments
if len(readsToReadCoverages.values()) > 0:
for readCoverages, outputName in [ (reduce(lambda x, y : x + y, readsToReadCoverages.values()), "coverage_all"), (map(lambda x : max(x, key=lambda y : y.readCoverage()), readsToReadCoverages.values()), "coverage_bestPerRead") ]:
parentNode = getAggregateCoverageStats(readCoverages, outputName, refSequences, readSequences, readsToReadCoverages, outputName)
open(os.path.join(self.outputDir, outputName + ".xml"), 'w').write(prettyXml(parentNode))
#this is a ugly file format with each line being a different data type - column length is variable
outf = open(os.path.join(self.outputDir, outputName + ".txt"), "w")
outf.write("MappedReadLengths " + parentNode.get("mappedReadLengths") + "\n")
outf.write("UnmappedReadLengths " + parentNode.get("unmappedReadLengths") + "\n")
outf.write("ReadCoverage " + parentNode.get("distributionreadCoverage") + "\n")
outf.write("MismatchesPerReadBase " + parentNode.get("distributionmismatchesPerReadBase") + "\n")
outf.write("ReadIdentity " + parentNode.get("distributionidentity") + "\n")
outf.write("InsertionsPerBase " + parentNode.get("distributioninsertionsPerReadBase") + "\n")
outf.write("DeletionsPerBase " + parentNode.get("distributiondeletionsPerReadBase") + "\n")
outf.close()
system("Rscript nanopore/analyses/coverage_plot.R {} {}".format(os.path.join(self.outputDir, outputName + ".txt"), os.path.join(self.outputDir, outputName + ".pdf")))
self.finish()
class GlobalCoverage(LocalCoverage):
def run(self):
"""Calculates coverage, treating alignments as global alignments.
"""
LocalCoverage.run(self, globalAlignment=True)
|
mitenjain/nanopore
|
nanopore/analyses/coverage.py
|
Python
|
mit
| 9,851
|
[
"pysam"
] |
9d4c0de9d4512154c109bc0262f930fc2f472b2c20488b528ed03195cc4e7a6d
|
""" unit tests for Transformation Clients
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=protected-access,missing-docstring,invalid-name
import six
import unittest
import json
import mock
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.TransformationSystem.Client.TaskManager import TaskBase
from DIRAC.TransformationSystem.Client.RequestTasks import RequestTasks
from DIRAC.TransformationSystem.Client.Transformation import Transformation
from DIRAC.TransformationSystem.Client.Utilities import PluginUtilities
from DIRAC.TransformationSystem.Client.BodyPlugin.DummyBody import DummyBody
class reqValFake_C(object):
def validate(self, opsInput):
for ops in opsInput:
if not len(ops):
return {"OK": False}
for f in ops:
try:
if not f.LFN:
return {"OK": False}
except Exception:
return {"OK": False}
return {"OK": True}
reqValFake = reqValFake_C()
class ClientsTestCase(unittest.TestCase):
"""Base class for the clients test cases"""
def setUp(self):
self.mockTransClient = mock.MagicMock()
self.mockTransClient.setTaskStatusAndWmsID.return_value = {"OK": True}
self.mockReqClient = mock.MagicMock()
self.taskBase = TaskBase(transClient=self.mockTransClient)
self.pu = PluginUtilities(transClient=self.mockTransClient)
self.requestTasks = RequestTasks(
transClient=self.mockTransClient, requestClient=self.mockReqClient, requestValidator=reqValFake
)
self.transformation = Transformation()
self.maxDiff = None
def tearDown(self):
pass
class TaskBaseSuccess(ClientsTestCase):
def test_updateDBAfterTaskSubmission(self):
res = self.taskBase.updateDBAfterTaskSubmission({})
self.assertEqual(res["OK"], True)
class PluginUtilitiesSuccess(ClientsTestCase):
def test_groupByReplicas(self):
res = self.pu.groupByReplicas(
{
"/this/is/at.1": ["SE1"],
"/this/is/at.12": ["SE1", "SE2"],
"/this/is/at.2": ["SE2"],
"/this/is/at_123": ["SE1", "SE2", "SE3"],
"/this/is/at_23": ["SE2", "SE3"],
"/this/is/at_4": ["SE4"],
},
"Flush",
)
self.assertTrue(res["OK"])
self.assertEqual(
res["Value"],
[
("SE1", ["/this/is/at.1"]),
("SE1,SE2", ["/this/is/at.12"]),
("SE1,SE2,SE3", ["/this/is/at_123"]),
("SE2", ["/this/is/at.2"]),
("SE2,SE3", ["/this/is/at_23"]),
("SE4", ["/this/is/at_4"]),
],
)
res = self.pu.groupByReplicas(
{
"/this/is/at.123": ["SE1", "SE2", "SE3"],
"/this/is/at.12": ["SE1", "SE2"],
"/this/is/at.134": ["SE1", "SE3", "SE4"],
},
"Flush",
)
self.assertTrue(res["OK"])
self.assertEqual(
res["Value"],
[
("SE1,SE2", ["/this/is/at.12"]),
("SE1,SE2,SE3", ["/this/is/at.123"]),
("SE1,SE3,SE4", ["/this/is/at.134"]),
],
)
class RequestTasksSuccess(ClientsTestCase):
def test_prepareTranformationTasks(self):
# No tasks in input
taskDict = {}
res = self.requestTasks.prepareTransformationTasks("", taskDict, "owner", "ownerGroup", "/bih/boh/DN")
self.assertTrue(res["OK"])
self.assertEqual(len(taskDict), 0)
# 3 tasks, 1 task not OK (in second transformation)
taskDict = {123: {"TransformationID": 2, "TargetSE": "SE3", "b3": "bb3", "InputData": ""}}
res = self.requestTasks.prepareTransformationTasks("", taskDict, "owner", "ownerGroup", "/bih/boh/DN")
self.assertTrue(res["OK"])
# We should "lose" one of the task in the preparation
self.assertEqual(len(taskDict), 0)
taskDict = {
1: {
"TransformationID": 1,
"TargetSE": "SE1",
"b1": "bb1",
"Site": "MySite",
"InputData": ["/this/is/a1.lfn", "/this/is/a2.lfn"],
},
2: {"TransformationID": 1, "TargetSE": "SE2", "b2": "bb2", "InputData": "/this/is/a1.lfn;/this/is/a2.lfn"},
3: {"TransformationID": 2, "TargetSE": "SE3", "b3": "bb3", "InputData": ""},
}
res = self.requestTasks.prepareTransformationTasks("", taskDict, "owner", "ownerGroup", "/bih/boh/DN")
self.assertTrue(res["OK"])
# We should "lose" one of the task in the preparation
self.assertEqual(len(taskDict), 2)
for task in res["Value"].values():
self.assertTrue(isinstance(task["TaskObject"], Request))
self.assertEqual(task["TaskObject"][0].Type, "ReplicateAndRegister")
try:
self.assertEqual(task["TaskObject"][0][0].LFN, "/this/is/a1.lfn")
except IndexError:
self.assertEqual(task["TaskObject"][0].Status, "Waiting")
try:
self.assertEqual(task["TaskObject"][0][1].LFN, "/this/is/a2.lfn")
except IndexError:
self.assertEqual(task["TaskObject"][0].Status, "Waiting")
# # test another (single) OperationType
res = self.requestTasks.prepareTransformationTasks(
"someType;LogUpload", taskDict, "owner", "ownerGroup", "/bih/boh/DN"
)
self.assertTrue(res["OK"])
# We should "lose" one of the task in the preparation
self.assertEqual(len(taskDict), 2)
for task in res["Value"].values():
self.assertTrue(isinstance(task["TaskObject"], Request))
self.assertEqual(task["TaskObject"][0].Type, "LogUpload")
# ## Multiple operations
transBody = [
("ReplicateAndRegister", {"SourceSE": "FOO-SRM", "TargetSE": "BAR-SRM"}),
("RemoveReplica", {"TargetSE": "FOO-SRM"}),
]
jsonBody = json.dumps(transBody)
taskDict = {
1: {
"TransformationID": 1,
"TargetSE": "SE1",
"b1": "bb1",
"Site": "MySite",
"InputData": ["/this/is/a1.lfn", "/this/is/a2.lfn"],
},
2: {"TransformationID": 1, "TargetSE": "SE2", "b2": "bb2", "InputData": "/this/is/a1.lfn;/this/is/a2.lfn"},
3: {"TransformationID": 2, "TargetSE": "SE3", "b3": "bb3", "InputData": ""},
}
res = self.requestTasks.prepareTransformationTasks(jsonBody, taskDict, "owner", "ownerGroup", "/bih/boh/DN")
self.assertTrue(res["OK"])
# We should "lose" one of the task in the preparation
self.assertEqual(len(taskDict), 2)
for task in res["Value"].values():
self.assertTrue(isinstance(task["TaskObject"], Request))
self.assertEqual(task["TaskObject"][0].Type, "ReplicateAndRegister")
self.assertEqual(task["TaskObject"][1].Type, "RemoveReplica")
try:
self.assertEqual(task["TaskObject"][0][0].LFN, "/this/is/a1.lfn")
self.assertEqual(task["TaskObject"][1][0].LFN, "/this/is/a1.lfn")
except IndexError:
self.assertEqual(task["TaskObject"][0].Status, "Waiting")
self.assertEqual(task["TaskObject"][1].Status, "Waiting")
try:
self.assertEqual(task["TaskObject"][0][1].LFN, "/this/is/a2.lfn")
self.assertEqual(task["TaskObject"][1][1].LFN, "/this/is/a2.lfn")
except IndexError:
self.assertEqual(task["TaskObject"][0].Status, "Waiting")
self.assertEqual(task["TaskObject"][1].Status, "Waiting")
self.assertEqual(task["TaskObject"][0].SourceSE, "FOO-SRM")
self.assertEqual(task["TaskObject"][0].TargetSE, "BAR-SRM")
self.assertEqual(task["TaskObject"][1].TargetSE, "FOO-SRM")
class TransformationSuccess(ClientsTestCase):
def test_setGet(self):
res = self.transformation.setTransformationName("TestTName")
self.assertTrue(res["OK"])
description = "Test transformation description"
res = self.transformation.setDescription(description)
longDescription = "Test transformation long description"
res = self.transformation.setLongDescription(longDescription)
self.assertTrue(res["OK"])
res = self.transformation.setType("MCSimulation")
self.assertTrue(res["OK"])
res = self.transformation.setPlugin("aPlugin")
self.assertTrue(res["OK"])
# # Test DataOperation Body
res = self.transformation.setBody("")
self.assertTrue(res["OK"])
self.assertEqual(self.transformation.paramValues["Body"], "")
res = self.transformation.setBody("_requestType;RemoveReplica")
self.assertTrue(res["OK"])
self.assertEqual(self.transformation.paramValues["Body"], "_requestType;RemoveReplica")
# #Json will turn tuples to lists and strings to unicode
transBody = [
[u"ReplicateAndRegister", {u"SourceSE": u"FOO-SRM", u"TargetSE": u"BAR-SRM"}],
[u"RemoveReplica", {u"TargetSE": u"FOO-SRM"}],
]
res = self.transformation.setBody(transBody)
self.assertTrue(res["OK"])
self.assertEqual(self.transformation.paramValues["Body"], json.dumps(transBody))
# # This is not true if any of the keys or values are not strings, e.g., integers
self.assertEqual(json.loads(self.transformation.paramValues["Body"]), transBody)
with self.assertRaisesRegex(TypeError, "Expected list"):
self.transformation.setBody({"ReplicateAndRegister": {"foo": "bar"}})
with self.assertRaisesRegex(TypeError, "Expected tuple"):
self.transformation.setBody(["ReplicateAndRegister", "RemoveReplica"])
with self.assertRaisesRegex(TypeError, "Expected 2-tuple"):
self.transformation.setBody([("ReplicateAndRegister", "RemoveReplica", "LogUpload")])
with self.assertRaisesRegex(TypeError, "Expected string"):
self.transformation.setBody([(123, "Parameter:Value")])
with self.assertRaisesRegex(TypeError, "Expected dictionary"):
self.transformation.setBody([("ReplicateAndRegister", "parameter=foo")])
with self.assertRaisesRegex(TypeError, "Expected string"):
self.transformation.setBody([("ReplicateAndRegister", {123: "foo"})])
with self.assertRaisesRegex(ValueError, "Unknown attribute"):
self.transformation.setBody([("ReplicateAndRegister", {"Request": Request()})])
with self.assertRaisesRegex(TypeError, "Cannot encode"):
self.transformation.setBody([("ReplicateAndRegister", {"Arguments": Request()})])
# Check that all tuples are checked by passing first a valid one,
# then a faulty one.
# It is enough to check one case, unlike above
with self.assertRaisesRegex(TypeError, "Expected 2-tuple"):
self.transformation.setBody([(u"RemoveReplica", {u"TargetSE": u"FOO-SRM"}), ("One", "too long", "tuple")])
# Test setting a body plugin as body
complexBody = DummyBody()
self.transformation.setBody(complexBody)
def test_SetGetReset(self):
"""Testing of the set, get and reset methods.
set*()
get*()
setTargetSE()
setSourceSE()
getTargetSE()
getSourceSE()
reset()
Ensures that after a reset all parameters are returned to their defaults
"""
res = self.transformation.getParameters()
self.assertTrue(res["OK"])
defaultParams = res["Value"].copy()
for parameterName, defaultValue in res["Value"].items():
if isinstance(defaultValue, six.string_types):
testValue = "TestValue"
else:
testValue = 99999
# # set*
setterName = "set%s" % parameterName
self.assertTrue(hasattr(self.transformation, setterName))
setter = getattr(self.transformation, setterName)
self.assertTrue(callable(setter))
res = setter(testValue)
self.assertTrue(res["OK"])
# # get*
getterName = "get%s" % parameterName
self.assertTrue(hasattr(self.transformation, getterName))
getter = getattr(self.transformation, getterName)
self.assertTrue(callable(getter))
res = getter()
self.assertTrue(res["OK"])
self.assertTrue(res["Value"], testValue)
res = self.transformation.reset()
self.assertTrue(res["OK"])
res = self.transformation.getParameters()
self.assertTrue(res["OK"])
for parameterName, resetValue in res["Value"].items():
self.assertEqual(resetValue, defaultParams[parameterName])
self.assertRaises(AttributeError, self.transformation.getTargetSE)
self.assertRaises(AttributeError, self.transformation.getSourceSE)
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(ClientsTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TaskBaseSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PluginUtilitiesSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(RequestTasksSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TransformationSuccess))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
ic-hep/DIRAC
|
src/DIRAC/TransformationSystem/Client/test/Test_Client_TransformationSystem.py
|
Python
|
gpl-3.0
| 13,918
|
[
"DIRAC"
] |
c4fde540d2f8a2ba7027d29392d9220d3f33ccb79d72d29a2853991e16d4195e
|
#
# $Revision: 738 $
# $Date: 2009-02-12 15:32:12 -0700 (Thu, 12 Feb 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/versions.py $
def get_svn_codec_version():
SVN_GLUE_VERSION="$Revision: 738 $"
justTheNumber=SVN_GLUE_VERSION[11:len(SVN_GLUE_VERSION)-2]
return justTheNumber
def get_codec_version():
return "2.02"
|
okkhoy/rlglue-python3-codec
|
src/rlglue/versions.py
|
Python
|
apache-2.0
| 422
|
[
"Brian"
] |
d2f25a9e305d7aef1fa9488b183c4b13d639dd3540d331b927c5b59ff467d090
|
"""
utilities
=========
Collection of classes and functions for components and services of ApBsInT:
- Potential manager
- Model (PM and B factor)
- Representation
"""
import numpy as np
import scipy.sparse as ssp
import scipy.linalg as sla
import numbers
import time # For profiling
import apbsint.helpers as helpers
import apbsint.coup_fact as cf
import apbsint.eptools_ext as epx
import apbsint.ptannotate_ext as pta
__all__ = ['ElemPotManager', 'PotManager', 'Model', 'ModelCoupled',
'ModelFactorized', 'Representation', 'RepresentationCoupled',
'RepresentationFactorized']
# Potential manager classes
class ElemPotManager:
"""
ElemPotManager
==============
Elementary component type for PotManager, collects potential name,
number of potentials and potential parameters.
A potential can be annotated by an object of type
pta.PotentialAnnotation. The default is None (no annotation).
Attributes should be changed only via access methods: we maintain
an up_date flag, based on which the internal representation is
recomputed in PotManager. Otherwise, attributes are not controlled
here, but only in PotManager.
"""
def __init__(self,name,size,pars,annobj=None):
self.setname(name,False)
self.setsize(size,False)
self.setpars(pars,False)
self.setannobj(annobj,False)
self.up_date = False
def setname(self,name,chk=True):
if not isinstance(name,str):
raise TypeError('NAME must be string')
if chk:
self.up_date = self.up_date and (name == self.name)
self.name = name
def setsize(self,size,chk=True):
if not isinstance(size,numbers.Integral) or size<1:
raise TypeError('SIZE must be positive integer')
if chk:
self.up_date = self.up_date and (size == self.size)
self.size = size
def setpars(self,pars,chk=True):
if not isinstance(pars,tuple):
pars = (pars,)
# Must be scalar (float), 1D np.ndarray, or tuple of such entries
for el in pars:
if not (isinstance(el,float) or
(isinstance(el,np.ndarray) and el.ndim == 1)):
raise TypeError('PARS must scalar, 1D numpy.ndarray, or list/tuple thereof')
if chk:
# Can't really check this, so assume it has changed
self.up_date = False
self.pars = pars
def setannobj(self,annobj,chk=True):
if not (annobj is None or isinstance(annobj,pta.PotentialAnnotation)):
raise TypeError('ANNOBJ must be apbsint.ptannotate_ext.PotentialAnnotation')
if chk:
self.up_date = self.up_date and (annobj == self.annobj)
self.annobj = annobj
# Mechanism to check whether internal representation has to be recomputed:
# - Recompute (in 'check_internal') if the up_date field of any ElemPotManager
# object is False
# - Set all up_date fields to False upon construction here
# - Set all up_date fields to True in 'check_internal'
class PotManager:
"""
PotManager
==========
A potential manager consists of one or more (tuple) ElemPotManager
objects, which are stacked. It also maintains an internal representation.
Call 'check_internal' before accessing the internal representation: it is
recomputed whenever any of the ElemPotManager objects has changed.
"""
def __init__(self,elem):
if isinstance(elem,list):
raise TypeError('ELEM must be tuple, not list')
if not isinstance(elem,tuple):
elem = (elem,)
for el in elem:
if not isinstance(el,ElemPotManager):
raise TypeError('ELEM must be tuple of ElemPotManager')
self.size = 0
for el in elem:
el.up_date = False
self.size += el.size
self.elem = elem
# Internal representation consists of 'potids', 'numpot', 'parvec',
# 'parshrd', 'annobj': all contiguous 1D np.ndarray, dtype np.int32
# except 'parvec': np.float64, 'annobj': np.uint64.
# 'annobj' stores a void* to the annotation object ('getptr' method),
# or 0 if none.
# See src/eptools/potentials/PotManagerFactory.h or documentation for
# details.
# Also, we compute 'updind' as index of all non-Gaussian potentials
# (type other than 'Gaussian').
def check_internal(self):
elem = self.elem
do_recomp = False
for el in elem:
if not el.up_date:
do_recomp = True
break
if do_recomp:
# Loop 1: Everything except PARVEC, determine size
nb = len(elem)
self.potids = np.empty(nb,dtype=np.int32)
self.numpot = np.empty(nb,dtype=np.int32)
self.annobj = np.zeros(nb,dtype=np.uint64)
parshrd = []
pvsz = 0 # Size of PARVEC
updind = []
pid_gauss = epx.getpotid('Gaussian')
off = 0
for k in xrange(nb):
pid = epx.getpotid(elem[k].name)
if pid == -1:
raise ValueError("Block {0}: Unknown potential name '{1}'".format(k,elem[k].name))
self.potids[k] = pid
numk = elem[k].size
self.numpot[k] = numk
if elem[k].annobj is not None:
self.annobj[k] = elem[k].annobj.getptr()
pars = elem[k].pars
nump = len(pars)
for p in xrange(nump):
if isinstance(pars[p],float):
parshrd.append(1)
sz = 1
else:
sz = len(pars[p])
if sz == 1:
parshrd.append(1)
else:
parshrd.append(0)
if sz != numk:
raise ValueError('Block {0}, pars[{1}]: Wrong size'.format(k,p))
pvsz += sz
if pid != pid_gauss:
updind.extend(range(off,off+numk))
off += numk
self.parshrd = np.array(parshrd,dtype=np.int32)
self.updind = np.array(updind,dtype=np.int32)
# Loop 2: Assemble PARVEC
self.parvec = np.empty(pvsz,dtype=np.float64)
off = 0
for k in xrange(nb):
pars = elem[k].pars
nump = len(pars)
for p in xrange(nump):
sz = 1 if isinstance(pars[p],float) else len(pars[p])
self.parvec[off:off+sz] = pars[p]
off += sz
# Test whether all parameter values are valid
msg = epx.potmanager_isvalid(self.potids,self.numpot,self.parvec,
self.parshrd,self.annobj)
if len(msg)>0:
raise ValueError(msg)
# Set all up_date flags
for el in elem:
el.up_date = True
def filterpots(self,nameset):
"""
Return index of potential positions corresponding to those with
type names in 'nameset' (set of str)
"""
if not isinstance(nameset,set):
raise TypeError('NAMESET must be set of strings')
off = 0
res = []
for el in self.elem:
numk = el.size
if el.name in nameset:
res.extend(range(off,off+numk))
off += numk
return np.array(res,dtype=np.int32)
class Model:
"""
Model
=====
Collects B coupling factor and potential manager (type apbsint.PotManager).
"""
def __init__(self,bfact,potman):
if not isinstance(potman,PotManager):
raise TypeError('POTMAN must be instance of apbsint.PotManager')
self.bfact = bfact
self.potman = potman
class ModelCoupled(Model):
"""
ModelCoupled
============
Model for coupled mode. The B factor must be of type apbsint.Mat.
"""
def __init__(self,bfact,potman):
if not isinstance(bfact,cf.Mat):
raise TypeError('BFACT must be instance of apbsint.Mat')
if bfact.transp:
raise TypeError('BFACT must not be transposed')
Model.__init__(self,bfact,potman)
if bfact.shape(0) != potman.size:
raise TypeError('BFACT, POTMAN must have same size')
class ModelFactorized(Model):
"""
ModelFactorized
===============
Model for factorized mode. The B factor must be of type
apbsint.MatFactorizedInf.
"""
def __init__(self,bfact,potman):
if not isinstance(bfact,cf.MatFactorizedInf):
raise TypeError('BFACT must be instance of apbsint.MatFactorizedInf')
Model.__init__(self,bfact,potman)
if bfact.shape(0) != potman.size:
raise TypeError('BFACT, POTMAN must have same size')
# Representation classes (coupled mode for now)
class Representation:
"""
Representation
==============
Base class for EP posterior representations. The EP (message)
parameters are also maintained here.
"""
def __init__(self,bfact,ep_pi=None,ep_beta=None):
self.ep_pi = None
if ep_pi is not None:
self.setpi(ep_pi)
self.ep_beta = None
if ep_beta is not None:
self.setbeta(ep_beta)
self.bfact = bfact
def setpi(self,ep_pi):
sz = self.size_pars()
if not helpers.check_vecsize(ep_pi,sz):
raise TypeError('EP_PI must be vector of size {0}'.format(sz))
if self.ep_pi is None:
self.ep_pi = np.empty(sz)
self.ep_pi[:] = ep_pi
def setbeta(self,ep_beta):
sz = self.size_pars()
if not helpers.check_vecsize(ep_beta,sz):
raise TypeError('EP_BETA must be vector of size {0}'.format(sz))
if self.ep_beta is None:
self.ep_beta = np.empty(sz)
self.ep_beta[:] = ep_beta
# Internal methods
def size_pars(self):
"""
Returns size of EP parameter vectors ep_pi, ep_beta
"""
raise NotImplementedError('SIZE_PARS must be implemented')
class RepresentationCoupled(Representation):
"""
RepresentationCoupled
=====================
EP posterior representation in coupled mode. Used for parallel and
sequential updating EP. We maintain the Cholesky factor L of the
posterior inverse covariance matrix A: A = L L^T, as well as
c = L^-1 B^T beta,
B the factor given in 'bfact' (must be type 'Mat'). If 'keep_margs'
is True, we also keep marginal moments in 'marg_means', 'marg_vars'.
"""
def __init__(self,bfact,ep_pi=None,ep_beta=None,keep_margs=False):
if not isinstance(bfact,cf.Mat):
raise TypeError('BFACT must be instance of apbsint.Mat')
Representation.__init__(self,bfact,ep_pi,ep_beta)
self.keep_margs = keep_margs
def size_pars(self):
return self.bfact.shape(0)
def refresh(self):
"""
Recompute representation from scratch, given EP paraemeters and B
coupling factor. If 'keep_margs'==True, the covariance A^-1 is
computed as byproduct. In this case, A^-1 is kept as attribute
'post_cov'.
ATTENTION: 'post_cov' is valid only directly after a call of
'refresh'. It is not kept up-2-date, and may even be overwritten
by other methods.
"""
#t_start0=time.time()
bfact = self.bfact
m, n = bfact.shape()
# Cholesky factor L and c vector
# We build the A matrix in 'self.lfact'. 'sla.cholesky' overwrites A
# directly by L.
# NOTE: 'resize' method does not work as documented. Even with no
# references to the object, it raises an error, so have to call with
# 'refcheck=False'
try:
self.lfact.resize((n,n),refcheck=False)
except AttributeError:
self.lfact = np.empty((n,n))
#t_start=time.time()
bfact.mat_btdb(self.ep_pi,self.lfact)
#t_stop=time.time()
#print 'Time(refresh::mat_btdb): %.8fs' % (t_stop-t_start)
self.lfact = sla.cholesky(self.lfact,lower=True,overwrite_a=True)
self.cvec = sla.solve_triangular(self.lfact,bfact.T().mvm(self.ep_beta),
lower=True,trans='N')
if self.keep_margs:
# Recompute marginal moments
self.marg_means = bfact.mvm(sla.solve_triangular(self.lfact,
self.cvec,
lower=True,
trans='T'))
# We need A^-1 for the marginal variances. It is written into
# 'self.post_cov'
try:
self.post_cov.resize((n,n),refcheck=False)
except AttributeError:
self.post_cov = np.empty((n,n))
self._comp_inva(self.post_cov)
#t_start=time.time()
self.marg_vars = bfact.diag_bsbt(self.post_cov)
#t_stop=time.time()
#print 'Time(refresh::diag_bsbt): %.8fs' % (t_stop-t_start)
# Cholesky up/downdate functions require F contiguous
# (do this down here, in case other methods called here prefer C
# contiguous)
if not self.lfact.flags['F_CONTIGUOUS']:
# NOTE: Very silly! 'sla.cholesky' produces C contiguous, so here
# a useless copy is done!
# We could use 'sla.cholesky' with 'lower=false', then assign
# the transpose. This would avoid the copy (dodgy?)
self.lfact = np.asfortranarray(self.lfact)
#t_stop0=time.time()
#print 'Time(refresh(ALL)): %.8fs' % (t_stop0-t_start0)
def update_single(self,j,delpi,delbeta,vvec=None):
"""
Change of EP parameters:
ep_pi[j] += delpi; ep_beta[j] += delbeta
The representation is updated accordingly. In particular, the
Cholesky factor 'lfact' is updated ('delpi'>0) or downdated
('delpi'<0). If 'keep_margs'==True, the marginal moments are
updated as well.
In 'vvec', the vector L^-1 B[j,:] can be passed. If not, it is
recomputed here.
NOTE: 'post_cov' (if given) is not updated!
"""
bfact = self.bfact
m, n = bfact.shape()
if not (isinstance(j,numbers.Integral) and j>=0 and j<m and
isinstance(delpi,numbers.Real) and
isinstance(delbeta,numbers.Real)):
raise ValueError('J, DELPI or DELBETA wrong')
if not (vvec is None or helpers.check_vecsize(vvec,n)):
raise TypeError('VVEC wrong')
# Scratch variables. We keep them as members, to avoid having to
# allocate them in every call
try:
self.cup_c.resize(n,refcheck=False)
self.cup_s.resize(n,refcheck=False)
self.cup_wk.resize(n,refcheck=False)
self.cup_z.resize((1,n),refcheck=False)
self.us_bvec.resize(n,refcheck=False)
if self.keep_margs:
self.us_wvec.resize(m,refcheck=False)
self.us_w2vec.resize(m,refcheck=False)
except AttributeError:
self.cup_c = np.empty(n)
self.cup_s = np.empty(n)
self.cup_wk = np.empty(n)
self.cup_z = np.empty((1,n),order='F')
self.us_bvec = np.empty(n)
if self.keep_margs:
self.us_wvec = np.empty(m)
self.us_w2vec = np.empty(m)
bvec = self.us_bvec
if self.keep_margs:
wvec = self.us_wvec
w2vec = self.us_w2vec
if delpi>0.:
# Cholesky update
tscal = np.sqrt(delpi)
bfact.T().getcol(j,bvec)
if self.keep_margs:
if vvec is None:
# Need 'vvec' below, so compute it here
vvec = sla.solve_triangular(self.lfact,bvec,lower=True,
trans='N')
mu = np.inner(vvec,self.cvec)
rho = np.inner(vvec,vvec)
bvec *= tscal
yscal = np.empty(1)
yscal[0] = delbeta/tscal
self.cup_z[0] = self.cvec
stat = epx.choluprk1(self.lfact,'L',bvec,self.cup_c,self.cup_s,
self.cup_wk,self.cup_z,yscal)
if stat != 0:
raise sla.LinAlgError("Numerical error in 'choluprk1' (external)")
self.cvec[:] = self.cup_z.ravel()
else:
# Cholesky downdate
tscal = np.sqrt(-delpi)
if vvec is None:
bfact.T().getcol(j,bvec)
vvec = sla.solve_triangular(self.lfact,bvec,lower=True,
trans='N')
if self.keep_margs:
mu = np.inner(vvec,self.cvec)
rho = np.inner(vvec,vvec)
yscal = np.empty(1)
yscal[0] = -delbeta/tscal
self.cup_z[0] = self.cvec
bvec[:] = vvec; bvec *= tscal
stat = epx.choldnrk1(self.lfact,'L',bvec,self.cup_c,self.cup_s,
self.cup_wk,self.cup_z,yscal)
if stat != 0:
raise sla.LinAlgError("Numerical error in 'choldnrk1' (external)")
self.cvec[:] = self.cup_z.ravel()
self.ep_pi[j] += delpi
self.ep_beta[j] += delbeta
if self.keep_margs:
# Update marginal moments
assert vvec is not None
bfact.mvm(sla.solve_triangular(self.lfact,vvec,lower=True,
trans='T'),wvec)
tscal = 1./(delpi*rho+1.);
w2vec[:] = wvec; w2vec *= ((delbeta-delpi*mu)*tscal)
self.marg_means += w2vec
wvec *= wvec; wvec *= (delpi*tscal)
self.marg_vars -= wvec
def get_marg(self,j,vvec=None):
"""
Returns (mu, rho), mu marginal mean, rho marginal variance at potential
j (Gaussian marginal, not tilted marginal).
If 'vvec' is given, L^-1 B[j,:] is written there. In this case, the
marginal is always computed from scratch. Otherwise, if
'keep_margs'==True, we use 'marg_XXX'.
If (mu, rho) are computed from scratch and 'keep_margs'==True, the
corr. entries of 'marg_XXX' are refreshed.
"""
bfact = self.bfact
m, n = bfact.shape()
if not (isinstance(j,numbers.Integral) and j>=0 and j<m):
raise ValueError('J wrong')
if vvec is None:
if self.keep_margs:
return (self.marg_means[j], self.marg_vars[j])
vvec = np.empty(n)
else:
if not helpers.check_vecsize(vvec,n):
raise TypeError('VVEC wrong')
try:
self.us_bvec.resize(n,refcheck=False)
except AttributeError:
self.us_bvec = np.empty(n)
bfact.T().getcol(j,self.us_bvec)
vvec[:] = sla.solve_triangular(self.lfact,self.us_bvec,lower=True,
trans='N')
mu = np.inner(vvec,self.cvec)
rho = np.inner(vvec,vvec)
if self.keep_margs:
# Refresh entries
self.marg_means[j] = mu
self.marg_vars[j] = rho
return (mu, rho)
def predict(self,pbfact,pmeans,pvars=None,use_cov=False):
"""
Compute predictive means (and variances, optional), given test set
coupling factor B_p (in 'pbfact').
Predictive variances require A^-1. If 'post_cov' is defined and
'use_cov'==True, A^-1 is taken from 'post_cov'. Otherwise, it is
computed here (and written into 'post_cov').
NOTE: Use 'use_cov'=True if 'refresh' with 'keep_margs'=True has
been called just before.
"""
if not isinstance(pbfact,cf.Mat):
raise TypeError('PBFACT must be instance of apbsint.Mat')
pm, n = pbfact.shape()
if n != self.bfact.shape(1):
raise TypeError('PBFACT has wrong size')
if not (helpers.check_vecsize(pmeans,pm) and
(pvars is None or helpers.check_vecsize(pvars,pm))):
raise TypeError('PMEANS or PVARS wrong')
# Predictive means
pbfact.mvm(sla.solve_triangular(self.lfact,self.cvec,lower=True,
trans='T'),pmeans)
if pvars is not None:
# Predictive variances: Need inverse A^-1
try:
if self.post_cov.shape != (n,n):
raise TypeError('Internal error: POST_COV attribute has wrong size')
except AttributeError:
if use_cov:
raise ValueError('POST_COV is not defined')
self.post_cov = np.empty((n,n))
amat = self.post_cov
if not use_cov:
self._comp_inva(amat)
pbfact.diag_bsbt(amat,pvars)
# Internal methods
def _comp_inva(self,amat):
"""
Compute inverse of A and write into 'amat' (must be right size and
C contiguous). The Cholesky factor of A is in 'lfact'.
"""
# Set 'amat' to identity matrix
amat.fill(0.)
np.fill_diagonal(amat,1.)
#if not (amat.flags['C_CONTIGUOUS'] or amat.flags['F_CONTIGUOUS']):
# raise TypeError('Internal error: AMAT should be contiguous!')
#aflat = amat.ravel()
#if aflat.flags['OWNDATA']:
# raise TypeError('Internal error: Need view here!')
#aflat.fill(0.)
#n = amat.shape[0]
#aflat[0::n+1] = 1.
amat[:] = sla.cho_solve((self.lfact,True),amat,overwrite_b=True)
class RepresentationFactorized(Representation):
"""
RepresentationFactorized
========================
EP posterior representation in factorized mode. B (in 'bfact') is a
sparse matrix (see apbsint.MatFactorizedInf).
'ep_pi', 'ep_beta' are the message parameters (flat, size 'bfact.nnz()').
'marg_pi', 'marg_beta' are the marginals (natural parameters).
Selective damping is supported if 'sd_numk' is given. The SD
representation tracks max_k pi_{k,i} for each variable, it is
initialized/recomputed by 'seldamp_reset'.
"""
def __init__(self,bfact,ep_pi=None,ep_beta=None):
if not isinstance(bfact,cf.MatFactorizedInf):
raise TypeError('BFACT must be apbsint.MatFactorizedInf')
Representation.__init__(self,bfact,ep_pi,ep_beta)
def size_pars(self):
return self.bfact.nnz()
def refresh(self):
"""
Recomputes marginals 'marg_pi', 'marg_beta' from message parameters
'ep_pi', 'ep_beta'.
"""
bf = self.bfact
m, n = bf.shape()
if self.ep_pi is None or self.ep_beta is None:
raise ValueError('EP parameters must be initialized')
try:
self.marg_pi.resize(n,refcheck=False)
self.marg_beta.resize(n,refcheck=False)
except AttributeError:
self.marg_pi = np.empty(n)
self.marg_beta = np.empty(n)
epx.fact_compmarginals(n,m,bf.rowind,bf.colind,bf.bvals,self.ep_pi,
self.ep_beta,self.marg_pi,self.marg_beta)
def predict(self,pbfact,pmeans,pvars=None):
if not isinstance(pbfact,cf.MatFactorizedInf):
raise TypeError('PBFACT must be apbsint.MatFactorizedInf')
pm, n = pbfact.shape()
if n != self.bfact.shape(1):
raise TypeError('PBFACT has wrong size')
if not (helpers.check_vecsize(pmeans,pm) and
(pvars is None or helpers.check_vecsize(pvars,pm))):
raise TypeError('PMEANS or PVARS wrong')
tvec = 1./self.marg_pi
if pvars is not None:
# 'pbfact.b2fact' is B_test**2
pvars[:] = pbfact.b2fact.dot(tvec)
tvec *= self.marg_beta
if pmeans.flags['C_CONTIGUOUS']:
pbfact.mvm(tvec,pmeans)
else:
pmeans[:] = pbfact.mvm(tvec)
def seldamp_reset(self,numk,subind=None,subexcl=False):
"""
Initializes or resets the selective damping (SD) representation. SD
ensures that cavity marginals are well-defined after each EP update.
The SD representation tracks max_k pi_{k,i} for each variable, by
storing the 'numk' largest pi values for each i. The larger 'numk',
the less often maxima have to be recomputed.
If 'subind' is given, max_k runs only over this index (if
'subexcl'==False) or over its complement (if 'subexcl'==True).
"""
bf = self.bfact
m, n = bf.shape()
if not isinstance(numk,numbers.Integral) or numk<2:
raise TypeError('NUMK must be integer > 1')
if not (subind is None or
(helpers.check_vecsize(subind) and subind.dtype == np.int32)):
raise TypeError('SUBIND must be numpy.ndarray with dtype numpy.int32')
(self.sd_numvalid, self.sd_topind, self.sd_topval) \
= epx.fact_compmaxpi(n,m,bf.rowind,bf.colind,bf.bvals,self.ep_pi,
self.ep_beta,numk,subind,subexcl)
self.sd_subind = subind
self.sd_subexcl = subexcl
self.sd_numk = numk
# Testcode (really basic)
if __name__ == "__main__":
pelem1 = ElemPotManager('Laplace',100,(0., 1.2))
pelem2 = ElemPotManager('Gaussian',200,(np.random.randn(200), 1.5))
pelem3 = ElemPotManager('Probit',7,(np.array([1.,-1.,1.,1.,1.,1.,-1.]),
0.))
pman = PotManager((pelem1, pelem2, pelem3))
pman.check_internal()
assert pman.updind == range(100) + range(300,307), \
'PotManager.check_internal: updind is wrong'
print('PotManager.check_internal seems OK')
# HIER: Test code for RepresentationCoupled!
|
mseeger/apbsint
|
python/apbsint/utilities.py
|
Python
|
bsd-3-clause
| 26,130
|
[
"Gaussian"
] |
dfac4bd8ce06cb63d650c43497bc20a23a54c51f9f27d1697731b8c090971067
|
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
def main():
myscreen = camvtk.VTKScreen()
focal = cam.Point(5, 5, 0)
r = 30
theta = (float(45)/360)*2*math.pi
fi=45
campos = cam.Point( r*math.sin(theta)*math.cos(fi), r*math.sin(theta)*math.sin(fi), r*math.cos(theta) )
myscreen.camera.SetPosition(campos.x, campos.y, campos.z)
myscreen.camera.SetFocalPoint(focal.x,focal.y, focal.z)
t = camvtk.Text()
t.SetPos( (myscreen.width-450, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
ytext = "kd-tree debug" #"Y: %3.3f" % (ycoord)
t2.SetText(ytext)
t2.SetPos( (50, myscreen.height-50) )
myscreen.addActor( t2)
#w2if = vtk.vtkWindowToImageFilter()
#w2if.SetInput(myscreen.renWin)
#lwr = vtk.vtkPNGWriter()
#lwr.SetInput( w2if.GetOutput() )
t.SetText("OpenCAMLib 10.03-beta, " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#ycoord = 1.1
stl = camvtk.STLSurf(filename="../stl/demo.stl")
#stl = camvtk.STLSurf(filename="../stl/demo2.stl")
print("STL surface read")
#myscreen.addActor(stl)
#stl.SetWireframe()
#stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STLSurf with ", s.size(), " triangles")
myscreen.addActor( camvtk.Sphere( center=(0,0,0), radius=0.2, color = camvtk.yellow ) )
s.build_kdtree()
print("built kd-tree")
s.jump_kd_reset()
tlist = s.get_kd_triangles()
print("got", len(tlist), " triangles")
while (s.jump_kd_hi()):
lotris = s.get_kd_triangles()
s.jump_kd_up()
cut = s.get_kd_cut()
s.jump_kd_lo()
hitris = s.get_kd_triangles()
lev = s.get_kd_level()
print("l=", lev, " hi=", len(hitris), " lo=", len(lotris), " cut=", cut)
if ( cut[0] < 2 ):
print("x cut ",)
if ( cut[0] == 0):
print("max" )
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.green ) )
else:
print("min" )
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.lgreen ) )
#myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print("y cut ",)
if ( cut[0] == 2):
print("max" )
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print("min")
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.pink ) )
slo = camvtk.STLSurf(triangleList=lotris)
slo.SetColor(camvtk.pink)
slo.SetWireframe()
shi = camvtk.STLSurf(triangleList=hitris)
shi.SetColor(camvtk.lgreen)
shi.SetWireframe()
myscreen.addActor(slo)
myscreen.addActor(shi)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
time.sleep(1)
myscreen.removeActor(slo)
myscreen.removeActor(shi)
print("done.")
myscreen.render()
#lwr.SetFileName(filename)
#raw_input("Press Enter to terminate")
time.sleep(0.2)
#lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
main()
#raw_input("Press Enter to terminate")
|
aewallin/opencamlib
|
examples/python/kdtree_debug_0.py
|
Python
|
lgpl-2.1
| 3,657
|
[
"VTK"
] |
147bb08c9d36afdb10021b79894a47ad5856f3b7782e7a5c474108c125c779cc
|
import numpy as np
import rdkit.Chem.Descriptors as Descriptors
import rdkit.Chem.rdMolDescriptors as rdMolDescriptors
import rdkit.Chem.EState as EState
import rdkit.Chem.rdPartialCharges as rdPartialCharges
import rdkit.Chem.rdChemReactions as rdRxns
att_dtype = np.float32
def oneHotVector(val, lst):
'''Converts a value to a one-hot vector based on options in lst'''
if val not in lst:
val = lst[-1]
return map(lambda x: x == val, lst)
def rxn_level_descriptors(rxn):
'''
Given an RDKit reaction, returns a reaction fingerprint as a numpy array
**for very basic testing**
'''
settings = rdRxns.ReactionFingerprintParams()
return np.array(rdRxns.CreateStructuralFingerprintForReaction(rxn, settings))
def mol_level_descriptors(mol):
'''
Given an RDKit mol, returns a list of molecule-level descriptors
and their names
returns: (labels, attributes)
'''
labels = [label for (label, f) in Descriptors._descList]
attributes = [f(mol) for (label, f) in Descriptors._descList]
return (labels, attributes)
def atom_level_descriptors(mol, include = ['functional'], asOneHot = False, ORIGINAL_VERSION = False):
'''
Given an RDKit mol, returns an N_atom-long list of lists,
each of which contains atom-level descriptors and their names
returns: (label, attributes)
'''
attributes = [[] for i in mol.GetAtoms()]
labels = []
if 'functional' in include:
[attributes[i].append(x[0]) \
for (i, x) in enumerate(rdMolDescriptors._CalcCrippenContribs(mol))]
labels.append('Crippen contribution to logp')
[attributes[i].append(x[1]) \
for (i, x) in enumerate(rdMolDescriptors._CalcCrippenContribs(mol))]
labels.append('Crippen contribution to mr')
[attributes[i].append(x) \
for (i, x) in enumerate(rdMolDescriptors._CalcTPSAContribs(mol))]
labels.append('TPSA contribution')
[attributes[i].append(x) \
for (i, x) in enumerate(rdMolDescriptors._CalcLabuteASAContribs(mol)[0])]
labels.append('Labute ASA contribution')
[attributes[i].append(x) \
for (i, x) in enumerate(EState.EStateIndices(mol))]
labels.append('EState Index')
rdPartialCharges.ComputeGasteigerCharges(mol)
[attributes[i].append(float(a.GetProp('_GasteigerCharge'))) \
for (i, a) in enumerate(mol.GetAtoms())]
labels.append('Gasteiger partial charge')
# Gasteiger partial charges sometimes gives NaN
for i in range(len(attributes)):
if np.isnan(attributes[i][-1]):
attributes[i][-1] = 0.0
[attributes[i].append(float(a.GetProp('_GasteigerHCharge'))) \
for (i, a) in enumerate(mol.GetAtoms())]
labels.append('Gasteiger hydrogen partial charge')
# Gasteiger partial charges sometimes gives NaN
for i in range(len(attributes)):
if np.isnan(attributes[i][-1]):
attributes[i][-1] = 0.0
if 'structural' in include:
[attributes[i].extend(atom_structural(mol.GetAtomWithIdx(i), asOneHot = asOneHot, ORIGINAL_VERSION = ORIGINAL_VERSION)) \
for i in range(len(attributes))]
labels.append('--many structural--')
return (labels, attributes)
def bond_structural(bond, asOneHot = False, extraOne = False):
'''
Returns a numpy array of attributes for an RDKit bond
- Bond type as double
- If bond is aromatic
- If bond is conjugated
- If bond is in ring
'''
# Redefine oneHotVector function
if not asOneHot:
oneHotVectorFunc = lambda x: x[0]
else:
oneHotVectorFunc = oneHotVector
# Initialize
attributes = []
# Add bond type
attributes += oneHotVectorFunc(
bond.GetBondTypeAsDouble(),
[1.0, 1.5, 2.0, 3.0]
)
# Add if is aromatic
attributes.append(bond.GetIsAromatic())
# Add if bond is conjugated
attributes.append(bond.GetIsConjugated())
# Add if bond is part of ring
attributes.append(bond.IsInRing())
# NEED THIS FOR TENSOR REPRESENTATION - 1 IF THERE IS A BOND
if extraOne: attributes.append(1)
return np.array(attributes, dtype = att_dtype)
def atom_structural(atom, asOneHot = False, ORIGINAL_VERSION = False):
'''
Returns a numpy array of attributes for an RDKit atom
- atomic number
- number of heavy neighbors
- total number of hydrogen neighbors
- formal charge
- is in a ring
- is aromatic
'''
# Redefine oneHotVector function
if not asOneHot:
oneHotVectorFunc = lambda x: x[0]
else:
oneHotVectorFunc = oneHotVector
# Initialize
attributes = []
if ORIGINAL_VERSION: # F_atom = 32 mode
attributes += oneHotVectorFunc(
atom.GetAtomicNum(),
[5, 6, 7, 8, 9, 15, 16, 17, 35, 53, 999]
)
attributes += oneHotVectorFunc(
len(atom.GetNeighbors()),
[0, 1, 2, 3, 4, 5]
)
attributes += oneHotVectorFunc(
atom.GetTotalNumHs(),
[0, 1, 2, 3, 4]
)
attributes.append(atom.GetFormalCharge())
attributes.append(atom.IsInRing())
attributes.append(atom.GetIsAromatic())
return np.array(attributes, dtype = att_dtype)
# Add atomic number (todo: finish)
attributes += oneHotVectorFunc(
atom.GetAtomicNum(),
[3, 5, 6, 7, 8, 9, 11, 12, 14, 15, 16, 17, 35, 53, 999]
)
# Add heavy neighbor count
attributes += oneHotVectorFunc(
len(atom.GetNeighbors()),
[0, 1, 2, 3, 4, 5]
)
# Add hydrogen count
attributes += oneHotVectorFunc(
atom.GetTotalNumHs(),
[0, 1, 2, 3, 4]
)
# Add formal charge
attributes.append(atom.GetFormalCharge())
# Add boolean if in ring
attributes.append(atom.IsInRing())
# Add boolean if aromatic atom
attributes.append(atom.GetIsAromatic())
# Adjacent to aromatic ring but not aromatic itself
attributes.append(atom.GetIsAromatic() == False and any([neighbor.GetIsAromatic() for neighbor in atom.GetNeighbors()]))
# Halogen
attributes.append(atom.GetAtomicNum() in [9, 17, 35, 53, 85, 117])
# Chalcogen
attributes.append(atom.GetAtomicNum() in [8, 16, 34, 52, 84, 116])
# Pnictogens
attributes.append(atom.GetAtomicNum() in [7, 15, 33, 51, 83])
# Alkali
attributes.append(atom.GetAtomicNum() in [3, 11, 19, 37, 55, 87])
# Alkaline earth
attributes.append(atom.GetAtomicNum() in [4, 12, 20, 38, 56, 88])
# Common metals
attributes.append(atom.GetAtomicNum() in [13, 22, 24, 25, 26, 27, 28, 29, 30, 33, 42, 44, 45, 46, 47, 48, 49, 50, 78, 80, 82])
return np.array(attributes, dtype = att_dtype)
def edits_to_vectors(edits, mol, atom_desc_dict = {}, return_atom_desc_dict = False, ORIGINAL_VERSION = False, include = ['functional', 'structural']):
'''
Given a set of edits (h_lost, h_gain, bond_lost, and bond_gain) from summarize_reaction_outcome,
this functionr eturns a set of vectors describiing those edits.
'''
if not atom_desc_dict:
atom_descriptors = atom_level_descriptors(mol, include = include, asOneHot = True, ORIGINAL_VERSION = ORIGINAL_VERSION)[1]
atom_desc_dict = {a.GetProp('molAtomMapNumber'): atom_descriptors[i] for (i, a) in enumerate(mol.GetAtoms()) if a.HasProp('molAtomMapNumber')}
if return_atom_desc_dict:
return atom_desc_dict
# h_lost, h_gain, bond_lost, bond_gain = edits
return (
[atom_desc_dict[molAtomMapNumber] for molAtomMapNumber in edits[0]],
[atom_desc_dict[molAtomMapNumber] for molAtomMapNumber in edits[1]],
[
atom_desc_dict[molAtomMapNumber1] +
oneHotVector(bondOrder, [1.0, 1.5, 2.0, 3.0]) +
atom_desc_dict[molAtomMapNumber2] \
for (molAtomMapNumber1, molAtomMapNumber2, bondOrder) in edits[2]
],
[
atom_desc_dict[molAtomMapNumber1] +
oneHotVector(bondOrder, [1.0, 1.5, 2.0, 3.0]) +
atom_desc_dict[molAtomMapNumber2] \
for (molAtomMapNumber1, molAtomMapNumber2, bondOrder) in edits[3]
]
)
|
connorcoley/ochem_predict_nn
|
utils/descriptors.py
|
Python
|
mit
| 7,409
|
[
"RDKit"
] |
30bb6fd608b7a537ffec3460f9dcf75a83033e9b6e68fe15940d42eb9dbc607a
|
# Storage filtering classes
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from collections import namedtuple
from blivet import arch
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, ZFCPDiskDevice
from blivet.fcoe import has_fcoe
from pyanaconda.flags import flags
from pyanaconda.i18n import CN_, CP_
from pyanaconda.storage_utils import try_populate_devicetree, on_disk_storage
from pyanaconda.ui.lib.disks import getDisks
from pyanaconda.ui.gui.utils import timed_action
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.advstorage.fcoe import FCoEDialog
from pyanaconda.ui.gui.spokes.advstorage.iscsi import ISCSIDialog
from pyanaconda.ui.gui.spokes.advstorage.zfcp import ZFCPDialog
from pyanaconda.ui.gui.spokes.advstorage.dasd import DASDDialog
from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog
from pyanaconda.ui.categories.system import SystemCategory
__all__ = ["FilterSpoke"]
DiskStoreRow = namedtuple("DiskStoreRow", ["visible", "selected", "mutable",
"name", "type", "model", "capacity",
"vendor", "interconnect", "serial",
"wwid", "paths", "port", "target",
"lun", "ccw", "wwpn"])
class FilterPage(object):
"""A FilterPage is the logic behind one of the notebook tabs on the filter
UI spoke. Each page has its own specific filtered model overlaid on top
of a common model that holds all non-advanced disks.
A Page is created once, when the filter spoke is initialized. It is
setup multiple times - each time the spoke is revisited. When the Page
is setup, it is given a complete view of all disks that belong on this
Page. This is because certain pages may require populating a combo with
all vendor names, or other similar tasks.
This class is just a base class. One subclass should be created for each
more specialized type of page. Only one instance of each subclass should
ever be created.
"""
def __init__(self, storage, builder):
"""Create a new FilterPage instance.
Instance attributes:
builder -- A reference to the Gtk.Builder instance containing
this page's UI elements.
filterActive -- Whether the user has chosen to filter results down
on this page. If set, visible_func should take the
filter UI elements into account.
storage -- An instance of a blivet object.
"""
self.builder = builder
self.storage = storage
self.model = None
self.filterActive = False
def ismember(self, device):
"""Does device belong on this page? This function should taken into
account what kind of thing device is. It should not be concerned
with any sort of filtering settings. It only determines whether
device belongs.
"""
return True
def setup(self, store, selectedNames, disks):
"""Do whatever setup of the UI is necessary before this page can be
displayed. This function is called every time the filter spoke
is revisited, and thus must first do any cleanup that is necessary.
The setup function is passed a reference to the master store, a list
of names of disks the user has selected (either from a previous visit
or via kickstart), and a list of all disk objects that belong on this
page as determined from the ismember method.
At the least, this method should add all the disks to the store. It
may also need to populate combos and other lists as appropriate.
"""
pass
def clear(self):
"""Blank out any filtering-related fields on this page and return them
to their defaults. This is called when the Clear button is clicked.
"""
pass
def visible_func(self, model, itr, *args):
"""This method is called for every row (disk) in the store, in order to
determine if it should be displayed on this page or not. This method
should take into account whether filterActive is set, perhaps whether
something in pyanaconda.flags is setup, and other settings to make
a final decision. Because filtering can be complicated, many pages
will want to farm this decision out to another method.
The return value is a boolean indicating whether the row is visible
or not.
"""
return True
def setupCombo(self, combo, items):
"""Populate a given GtkComboBoxText instance with a list of items. The
combo will first be cleared, so this method is suitable for calling
repeatedly. The first item in the list will be empty to allow the
combo box criterion to be cleared. The first non-empty item in the
list will be selected by default.
"""
combo.remove_all()
combo.append_text('')
for i in sorted(set(items)):
combo.append_text(i)
if items:
combo.set_active(1)
def _long_identifier(self, disk):
# For iSCSI devices, we want the long ip-address:port-iscsi-tgtname-lun-XX
# identifier, but blivet doesn't expose that in any useful way and I don't
# want to go asking udev. Instead, we dig around in the deviceLinks and
# default to the name if we can't figure anything else out.
for link in disk.deviceLinks:
if "by-path" in link:
lastSlash = link.rindex("/")+1
return link[lastSlash:]
return disk.name
class SearchPage(FilterPage):
# Match these to searchTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_PORT_TARGET_LUN = 'PTL'
SEARCH_TYPE_WWID = 'WWID'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("searchModel")
self.model.set_visible_func(self.visible_func)
self._lunEntry = self.builder.get_object("searchLUNEntry")
self._wwidEntry = self.builder.get_object("searchWWIDEntry")
self._combo = self.builder.get_object("searchTypeCombo")
self._portCombo = self.builder.get_object("searchPortCombo")
self._targetEntry = self.builder.get_object("searchTargetEntry")
def setup(self, store, selectedNames, disks):
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
ports = []
for disk in disks:
if hasattr(disk, "node") and disk.node is not None:
ports.append(str(disk.node.port))
self.setupCombo(self._portCombo, ports)
def clear(self):
self._lunEntry.set_text("")
self._portCombo.set_active(0)
self._targetEntry.set_text("")
self._wwidEntry.set_text("")
def _port_equal(self, device):
active = self._portCombo.get_active_text()
if active:
if hasattr(device, "node"):
return device.node.port == int(active)
else:
return False
else:
return True
def _target_equal(self, device):
active = self._targetEntry.get_text().strip()
if active:
return active in getattr(device, "initiator", "")
else:
return True
def _lun_equal(self, device):
active = self._lunEntry.get_text().strip()
if active:
if hasattr(device, "node"):
try:
return int(active) == device.node.tpgt
except ValueError:
return False
elif hasattr(device, "fcp_lun"):
return active in device.fcp_lun
else:
return True
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_PORT_TARGET_LUN:
return self._port_equal(device) and self._target_equal(device) and self._lun_equal(device)
elif filterBy == self.SEARCH_TYPE_WWID:
return self._wwidEntry.get_text() in getattr(device, "wwid", self._long_identifier(device))
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self._filter_func(device)
class MultipathPage(FilterPage):
# Match these to multipathTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_VENDOR = 'Vendor'
SEARCH_TYPE_INTERCONNECT = 'Interconnect'
SEARCH_TYPE_WWID = 'WWID'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("multipathModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("multipathTypeCombo")
self._icCombo = self.builder.get_object("multipathInterconnectCombo")
self._vendorCombo = self.builder.get_object("multipathVendorCombo")
self._wwidEntry = self.builder.get_object("multipathWWIDEntry")
def ismember(self, device):
return isinstance(device, MultipathDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
disk.wwid, "\n".join(paths), "", "",
"", "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._vendorCombo.set_active(0)
self._wwidEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_VENDOR:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_INTERCONNECT:
return device.bus == self._icCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_WWID:
return self._wwidEntry.get_text() in device.wwid
def visible_func(self, model, itr, *args):
if not flags.mpath:
return False
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class OtherPage(FilterPage):
# Match these to otherTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_VENDOR = 'Vendor'
SEARCH_TYPE_INTERCONNECT = 'Interconnect'
SEARCH_TYPE_ID = 'ID'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("otherModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("otherTypeCombo")
self._icCombo = self.builder.get_object("otherInterconnectCombo")
self._idEntry = self.builder.get_object("otherIDEntry")
self._vendorCombo = self.builder.get_object("otherVendorCombo")
def ismember(self, device):
return isinstance(device, iScsiDiskDevice) or isinstance(device, FcoeDiskDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if hasattr(disk, "node") and disk.node is not None:
port = str(disk.node.port)
lun = str(disk.node.tpgt)
else:
port = ""
lun = ""
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "\n".join(paths), port, getattr(disk, "initiator", ""),
lun, "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._idEntry.set_text("")
self._vendorCombo.set_active(0)
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_VENDOR:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_INTERCONNECT:
return device.bus == self._icCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_ID:
for link in device.deviceLinks:
if "by-path" in link:
return self._idEntry.get_text().strip() in link
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class ZPage(FilterPage):
# Match these to zTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_CCW = 'CCW'
SEARCH_TYPE_WWPN = 'WWPN'
SEARCH_TYPE_LUN = 'LUN'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("zModel")
self.model.set_visible_func(self.visible_func)
self._ccwEntry = self.builder.get_object("zCCWEntry")
self._wwpnEntry = self.builder.get_object("zWWPNEntry")
self._lunEntry = self.builder.get_object("zLUNEntry")
self._combo = self.builder.get_object("zTypeCombo")
self._isS390 = arch.is_s390()
def clear(self):
self._lunEntry.set_text("")
self._ccwEntry.set_text("")
self._wwpnEntry.set_text("")
def ismember(self, device):
return isinstance(device, ZFCPDiskDevice) or isinstance(device, DASDDevice)
def setup(self, store, selectedNames, disks):
""" Set up our Z-page, but only if we're running on s390x. """
if not self._isS390:
return
else:
ccws = []
wwpns = []
luns = []
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if getattr(disk, "type") == "zfcp":
# remember to store all of the zfcp-related junk so we can
# see it in the UI
if not disk.fcp_lun in luns:
luns.append(disk.fcp_lun)
if not disk.wwpn in wwpns:
wwpns.append(disk.wwpn)
if not disk.hba_id in ccws:
ccws.append(disk.hba_id)
# now add it to our store
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial, "", "\n".join(paths),
"", "", disk.fcp_lun, disk.hba_id, disk.wwpn])
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_CCW:
return self._ccwEntry.get_text() in device.hba_id
elif filterBy == self.SEARCH_TYPE_WWPN:
return self._wwpnEntry.get_text() in device.wwpn
elif filterBy == self.SEARCH_TYPE_LUN:
return self._lunEntry.get_text() in device.fcp_lun
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class FilterSpoke(NormalSpoke):
"""
.. inheritance-diagram:: FilterSpoke
:parts: 3
"""
builderObjects = ["diskStore", "filterWindow",
"searchModel", "multipathModel", "otherModel", "zModel"]
mainWidgetName = "filterWindow"
uiFile = "spokes/filter.glade"
helpFile = "FilterSpoke.xml"
category = SystemCategory
title = CN_("GUI|Spoke", "_INSTALLATION DESTINATION")
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
self.applyOnSkip = True
self.ancestors = []
self.disks = []
self.selected_disks = []
@property
def indirect(self):
return True
# This spoke has no status since it's not in a hub
@property
def status(self):
return None
def apply(self):
onlyuse = self.selected_disks[:]
for disk in [d for d in self.storage.disks if d.name in onlyuse]:
onlyuse.extend([d.name for d in disk.ancestors
if d.name not in onlyuse])
self.data.ignoredisk.onlyuse = onlyuse
self.data.clearpart.drives = self.selected_disks[:]
# some disks may have been added in this spoke, we need to recreate the
# snapshot of on-disk storage
if on_disk_storage.created:
on_disk_storage.dispose_snapshot()
on_disk_storage.create_snapshot(self.storage)
def initialize(self):
NormalSpoke.initialize(self)
self.pages = [SearchPage(self.storage, self.builder),
MultipathPage(self.storage, self.builder),
OtherPage(self.storage, self.builder),
ZPage(self.storage, self.builder)]
self._notebook = self.builder.get_object("advancedNotebook")
if not arch.is_s390():
self._notebook.remove_page(-1)
self.builder.get_object("addZFCPButton").destroy()
self.builder.get_object("addDASDButton").destroy()
if not has_fcoe():
self.builder.get_object("addFCOEButton").destroy()
self._store = self.builder.get_object("diskStore")
self._addDisksButton = self.builder.get_object("addDisksButton")
def _real_ancestors(self, disk):
# Return a list of all the ancestors of a disk, but remove the disk
# itself from this list.
return [d for d in disk.ancestors if d.name != disk.name]
def refresh(self):
NormalSpoke.refresh(self)
self.disks = getDisks(self.storage.devicetree)
self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.ancestors = [d.name for disk in self.disks for d in self._real_ancestors(disk)]
self._store.clear()
allDisks = []
multipathDisks = []
otherDisks = []
zDisks = []
# Now all all the non-local disks to the store. Everything has been set up
# ahead of time, so there's no need to configure anything. We first make
# these lists of disks, then call setup on each individual page. This is
# because there could be page-specific setup to do that requires a complete
# view of all the disks on that page.
for disk in self.disks:
if self.pages[1].ismember(disk):
multipathDisks.append(disk)
elif self.pages[2].ismember(disk):
otherDisks.append(disk)
elif self.pages[3].ismember(disk):
zDisks.append(disk)
allDisks.append(disk)
self.pages[0].setup(self._store, self.selected_disks, allDisks)
self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
self.pages[2].setup(self._store, self.selected_disks, otherDisks)
self.pages[3].setup(self._store, self.selected_disks, zDisks)
self._update_summary()
def _update_summary(self):
summaryButton = self.builder.get_object("summary_button")
label = self.builder.get_object("summary_button_label")
# We need to remove ancestor devices from the count. Otherwise, we'll
# end up in a situation where selecting one multipath device could
# potentially show three devices selected (mpatha, sda, sdb for instance).
count = len([disk for disk in self.selected_disks if disk not in self.ancestors])
summary = CP_("GUI|Installation Destination|Filter",
"%d _storage device selected",
"%d _storage devices selected",
count) % count
label.set_text(summary)
label.set_use_underline(True)
summaryButton.set_visible(count > 0)
label.set_sensitive(count > 0)
def on_back_clicked(self, button):
self.skipTo = "StorageSpoke"
NormalSpoke.on_back_clicked(self, button)
def on_summary_clicked(self, button):
dialog = SelectedDisksDialog(self.data)
# Include any disks selected in the initial storage spoke, plus any
# selected in this filter UI.
disks = [disk for disk in self.disks if disk.name in self.selected_disks]
free_space = self.storage.get_free_space(disks=disks)
with self.main_window.enlightbox(dialog.window):
dialog.refresh(disks, free_space, showRemove=False, setBoot=False)
dialog.run()
@timed_action(delay=1200, busy_cursor=False)
def on_filter_changed(self, *args):
n = self._notebook.get_current_page()
self.pages[n].filterActive = True
self.pages[n].model.refilter()
def on_clear_icon_clicked(self, entry, icon_pos, event):
if icon_pos == Gtk.EntryIconPosition.SECONDARY:
entry.set_text("")
def on_page_switched(self, notebook, newPage, newPageNum, *args):
self.pages[newPageNum].model.refilter()
notebook.get_nth_page(newPageNum).show_all()
def on_row_toggled(self, button, path):
if not path:
return
page_index = self._notebook.get_current_page()
filter_model = self.pages[page_index].model
model_itr = filter_model.get_iter(path)
itr = filter_model.convert_iter_to_child_iter(model_itr)
self._store[itr][1] = not self._store[itr][1]
if self._store[itr][1] and self._store[itr][3] not in self.selected_disks:
self.selected_disks.append(self._store[itr][3])
elif not self._store[itr][1] and self._store[itr][3] in self.selected_disks:
self.selected_disks.remove(self._store[itr][3])
self._update_summary()
@timed_action(delay=50, threshold=100)
def on_refresh_clicked(self, widget, *args):
try_populate_devicetree(self.storage.devicetree)
self.refresh()
def on_add_iscsi_clicked(self, widget, *args):
dialog = ISCSIDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_fcoe_clicked(self, widget, *args):
dialog = FCoEDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_zfcp_clicked(self, widget, *args):
dialog = ZFCPDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
rc = dialog.run()
if rc == 1:
self.skipTo = "StorageSpoke"
self.on_back_clicked(rc)
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_dasd_clicked(self, widget, *args):
dialog = DASDDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
##
## SEARCH TAB SIGNAL HANDLERS
##
def on_search_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("searchTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## MULTIPATH TAB SIGNAL HANDLERS
##
def on_multipath_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("multipathTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## OTHER TAB SIGNAL HANDLERS
##
def on_other_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("otherTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## Z TAB SIGNAL HANDLERS
##
def on_z_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("zTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
|
sujithshankar/anaconda
|
pyanaconda/ui/gui/spokes/filter.py
|
Python
|
gpl-2.0
| 27,986
|
[
"VisIt"
] |
f2ce42cb0078e8eb5a1cf68b8a82674d94a3a70fba8a610402517ced39fd5316
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
import MDAnalysis as mda
import pytest
from MDAnalysis.coordinates.memory import MemoryReader
from MDAnalysisTests.datafiles import DCD, PSF
from MDAnalysisTests.coordinates.base import (BaseReference,
MultiframeReaderTest)
from MDAnalysis.coordinates.memory import Timestep
from numpy.testing import assert_equal, assert_almost_equal
class MemoryReference(BaseReference):
def __init__(self):
super(MemoryReference, self).__init__()
self.topology = PSF
self.trajectory = DCD
self.universe = mda.Universe(PSF, DCD)
self.n_atoms = self.universe.trajectory.n_atoms
self.n_frames = self.universe.trajectory.n_frames
self.dt = self.universe.trajectory.ts.dt
self.dimensions = self.universe.trajectory.ts.dimensions
self.totaltime = self.universe.trajectory.totaltime
self.volume = self.universe.trajectory.ts.volume
self.first_frame = Timestep(self.n_atoms)
self.first_frame.positions = np.array(self.universe.trajectory[0])
self.first_frame.frame = 0
self.first_frame.time = self.first_frame.frame*self.dt
self.second_frame = Timestep(self.n_atoms)
self.second_frame.positions = np.array(self.universe.trajectory[1])
self.second_frame.frame = 1
self.second_frame.time = self.second_frame.frame*self.dt
self.last_frame = Timestep(self.n_atoms)
self.last_frame.positions = \
np.array(self.universe.trajectory[self.n_frames - 1])
self.last_frame.frame = self.n_frames - 1
self.last_frame.time = self.last_frame.frame*self.dt
self.jump_to_frame = self.first_frame.copy()
self.jump_to_frame.positions = np.array(self.universe.trajectory[3])
self.jump_to_frame.frame = 3
self.jump_to_frame.time = self.jump_to_frame.frame*self.dt
def reader(self, trajectory):
return mda.Universe(self.topology,
trajectory, in_memory=True).trajectory
def iter_ts(self, i):
ts = self.universe.trajectory[i]
# correct time because memory reader doesn't read the correct time
ts.time = ts.frame * self.dt
return ts
class TestMemoryReader(MultiframeReaderTest):
@staticmethod
@pytest.fixture(scope='class')
def ref():
return MemoryReference()
def test_filename_transefer_to_memory(self):
# MemoryReader should have a filename attribute set to the trajaectory filename
universe = mda.Universe(PSF, DCD)
universe.transfer_to_memory()
assert_equal(universe.trajectory.filename, DCD)
def test_filename_array(self):
# filename attribute of MemoryReader should be None when generated from an array
universe = mda.Universe(PSF, DCD)
coordinates = universe.trajectory.timeseries(universe.atoms)
universe2 = mda.Universe(PSF, coordinates, format=MemoryReader, order='afc')
assert universe2.trajectory.filename is None
def test_default_memory_layout(self):
universe1 = mda.Universe(PSF, DCD, in_memory=True)
universe2 = mda.Universe(PSF, DCD, in_memory=True, order='fac')
assert_equal(universe1.trajectory.get_array().shape,
universe2.trajectory.get_array().shape)
def test_iteration(self, ref, reader):
frames = 0
for i, frame in enumerate(reader):
frames += 1
assert frames == ref.n_frames
def test_extract_array_afc(self,reader):
assert_equal(reader.timeseries(order='afc').shape, (3341, 98, 3))
def test_extract_array_afc(self, reader):
assert_equal(reader.timeseries(order='afc').shape, (3341, 98, 3))
def test_extract_array_fac(self, reader):
assert_equal(reader.timeseries(order='fac').shape, (98, 3341, 3))
def test_extract_array_cfa(self, reader):
assert_equal(reader.timeseries(order='cfa').shape, (3, 98, 3341))
def test_extract_array_acf(self, reader):
assert_equal(reader.timeseries(order='acf').shape, (3341, 3, 98))
def test_extract_array_fca(self, reader):
assert_equal(reader.timeseries(order='fca').shape, (98, 3, 3341))
def test_extract_array_caf(self, reader):
assert_equal(reader.timeseries(order='caf').shape, (3, 3341, 98))
def test_timeseries_skip1(self, ref, reader):
assert_equal(reader.timeseries(ref.universe.atoms).shape,
(3341, 98, 3))
def test_timeseries_skip10(self, reader):
# Check that timeseries skip works similar to numpy slicing
array1 = reader.timeseries(step=10)
array2 = reader.timeseries()[:,::10,:]
assert_equal(array1, array2)
def test_timeseries_view(self, reader):
# timeseries() is expected to provide a view of the underlying array
assert reader.timeseries().base is reader.get_array()
def test_timeseries_subarray_view(self, reader):
# timeseries() is expected to provide a view of the underlying array
# also in the case where we slice the array using the start, stop and
# step options.
assert reader.timeseries(start=5,stop=15,step=2,order='fac').base is\
reader.get_array()
def test_timeseries_view_from_universe_atoms(self, ref, reader):
# timeseries() is expected to provide a view of the underlying array
# also in the special case when asel=universe.atoms.
selection = ref.universe.atoms
assert reader.timeseries(asel=selection).base is reader.get_array()
def test_timeseries_view_from_select_all(self, ref, reader):
# timeseries() is expected to provide a view of the underlying array
# also in the special case when using "all" in selections.
selection = ref.universe.select_atoms("all")
assert_equal(reader.timeseries(
asel=selection).base is reader.get_array(),
True)
def test_timeseries_noview(self, ref, reader):
# timeseries() is expected NOT to provide a view of the underlying array
# for any other selection than "all".
selection = ref.universe.select_atoms("name CA")
assert reader.timeseries(asel=selection).base is not reader.get_array()
def test_repr(self, reader):
str_rep = str(reader)
expected = "<MemoryReader with 98 frames of 3341 atoms>"
assert_equal(str_rep, expected)
def test_get_writer_1(self):
pass
def test_get_writer_2(self):
pass
def test_float32(self, ref):
# Check that we get float32 positions even when initializing with float64
coordinates = np.random.uniform(size=(100, ref.universe.atoms.n_atoms, 3)).cumsum(0)
universe = mda.Universe(ref.universe.filename, coordinates, format=MemoryReader)
assert_equal(universe.trajectory.get_array().dtype, np.dtype('float32'))
def test_position_assignation(self, reader):
# When coordinates are assigned to a timestep, is the change persistent?
new_positions = np.ones_like(reader.ts.positions, dtype=np.float32)
reader.ts.positions = new_positions
reader[0]
assert_almost_equal(reader.ts.positions, new_positions)
class TestMemoryReaderVelsForces(object):
@staticmethod
@pytest.fixture(params=['2d', '3d'])
def ref_pos(request):
if request.param == '2d':
return np.arange(30).reshape(10, 3)
elif request.param == '3d':
return np.arange(30).reshape(1, 10, 3)
@staticmethod
@pytest.fixture(params=['2d', '3d'])
def ref_vels(request):
if request.param == '2d':
return np.arange(30).reshape(10, 3) + 100
elif request.param == '3d':
return np.arange(30).reshape(1, 10, 3) + 100
@staticmethod
@pytest.fixture(params=['2d', '3d'])
def ref_forces(request):
if request.param == '2d':
return np.arange(30).reshape(10, 3) + 1000
elif request.param == '3d':
return np.arange(30).reshape(1, 10, 3) + 1000
@staticmethod
def assert_equal_dims(arr1, arr2):
if arr2.ndim == 3:
assert_equal(arr1, arr2[0])
elif arr2.ndim == 2:
assert_equal(arr1, arr2)
def test_velocities(self, ref_pos, ref_vels):
mr = MemoryReader(ref_pos,
velocities=ref_vels)
assert mr.ts.has_velocities
self.assert_equal_dims(mr.ts.velocities, ref_vels)
assert not mr.ts.has_forces
def test_forces(self, ref_pos, ref_forces):
mr = MemoryReader(ref_pos,
forces=ref_forces)
assert not mr.ts.has_velocities
assert mr.ts.has_forces
self.assert_equal_dims(mr.ts.forces, ref_forces)
def test_both(self, ref_pos, ref_vels, ref_forces):
mr = MemoryReader(ref_pos,
velocities=ref_vels,
forces=ref_forces)
assert mr.ts.has_velocities
self.assert_equal_dims(mr.ts.velocities, ref_vels)
assert mr.ts.has_forces
self.assert_equal_dims(mr.ts.forces, ref_forces)
@pytest.mark.parametrize('param', ['velocities', 'forces'])
def test_wrongshape(self, ref_pos, param):
with pytest.raises(ValueError):
mr = MemoryReader(ref_pos, **{param: np.zeros((3, 2, 1))})
class TestDimensions(object):
@staticmethod
@pytest.fixture
def ref_pos():
return np.arange(270).reshape(3, 30, 3)
@staticmethod
@pytest.fixture
def ref_box():
return np.arange(18).reshape(3, 6)
def test_single_box(self, ref_pos):
box = np.array([3, 4, 5, 90, 90, 90])
mr = MemoryReader(ref_pos, dimensions=box)
for ts in mr:
assert_equal(ts.dimensions, box)
def test_varying_box(self, ref_pos, ref_box):
mr = MemoryReader(ref_pos, dimensions=ref_box)
for i, ts in enumerate(mr):
assert_equal(ts.dimensions, ref_box[i])
def test_wrong_length(self, ref_pos):
bad_box = np.arange(12).reshape(2, 6)
with pytest.raises(ValueError):
mr = MemoryReader(ref_pos, dimensions=bad_box)
def test_wrong_shape(self, ref_pos):
bad_box = np.arange(15).reshape(3, 5)
with pytest.raises(ValueError):
mr = MemoryReader(ref_pos, dimensions=bad_box)
class TestMemoryReaderModifications(object):
# check that modifying MR things behaves as expected
# in general, modifying the Timestep should be *permanent*
# this is unlike other Readers!
n_atoms = 10
n_frames = 4
@pytest.fixture()
def mr_reader(self):
pos = np.arange(self.n_frames * self.n_atoms * 3).reshape(
self.n_frames, self.n_atoms, 3)
vel = np.arange(self.n_frames * self.n_atoms * 3).reshape(
self.n_frames, self.n_atoms, 3) + 200
frc = np.arange(self.n_frames * self.n_atoms * 3).reshape(
self.n_frames, self.n_atoms, 3) + 400
box = np.arange(self.n_frames * 6).reshape(self.n_frames, 6) + 600
return MemoryReader(pos,
velocities=vel,
forces=frc,
dimensions=box)
@pytest.fixture()
def mr_universe(self, mr_reader):
u = mda.Universe.empty(self.n_atoms)
u.trajectory = mr_reader
return u
@pytest.mark.parametrize('attr', ['positions', 'velocities', 'forces', 'dimensions'])
def test_copying(self, mr_reader, attr):
mr2 = mr_reader.copy()
# update the attribute
ts = mr2.ts
setattr(ts, attr, 7)
# check the change worked
assert_almost_equal(getattr(ts, attr), 7)
assert ts.positions.shape == (self.n_atoms, 3)
assert ts.velocities.shape == (self.n_atoms, 3)
assert ts.forces.shape == (self.n_atoms, 3)
assert ts.dimensions.shape == (6,)
# move the Reader around, forcing updates of ts
ts = mr2[2]
ts = mr2[0]
# check our old change is still there
assert_almost_equal(getattr(ts, attr), 7)
@pytest.mark.parametrize('attr', ['positions', 'velocities', 'forces', 'dimensions'])
def test_attr_set(self, mr_universe, attr):
# same as above, but via a Universe/AtomGroup
u = mr_universe
ts = u.trajectory[0]
setattr(ts, attr, 7)
assert_almost_equal(getattr(ts, attr), 7)
ts = u.trajectory[2]
ts = u.trajectory[0]
assert_almost_equal(getattr(ts, attr), 7)
assert u.atoms.positions.shape == (self.n_atoms, 3)
assert u.atoms.velocities.shape == (self.n_atoms, 3)
assert u.atoms.forces.shape == (self.n_atoms, 3)
assert u.atoms.dimensions.shape == (6,)
@pytest.mark.parametrize('attr', ['velocities', 'forces', 'dimensions'])
def test_non_numpy_arr(self, attr):
with pytest.raises(TypeError):
mr = MemoryReader(np.zeros((10, 30, 3)),
**{attr: 'not an array'})
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_memory.py
|
Python
|
gpl-2.0
| 14,202
|
[
"MDAnalysis"
] |
aeb0649b4ebb060035ce4befdac7681ed46d5f92420e5a70c3a1b47b7979a7ae
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.func_graph import FuncGraph
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_state
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import object_identity
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
def _MarkReachedOps(from_ops, reached_ops, func_graphs):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: set of Operations.
func_graphs: list of FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if op not in reached_ops:
reached_ops.add(op)
for output in op.outputs:
if _IsBackpropagatable(output):
queue.extend(_Consumers(output, func_graphs))
def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,
xs_set):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op]' indicates the number of backprop inputs
to this operation.
Args:
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
func_graphs: list of FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops. This is
useful if to_ops occur in a function and from_ops are in an outer function
or graph.
xs_set: ObjectIdentitySet of Tensors.
Returns:
A tuple containing: (1) the subset of to_ops reachable from from_ops by a
path of zero or more backpropagatable tensors, (2) a mapping from operation
to the number of backprop inputs to that op, and (3) a ControlFlowState
object which is not None if the ops between from_ops and to_ops contain
control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = set()
_MarkReachedOps(from_ops, reached_ops, func_graphs)
# X in reached_ops iff X is reachable from from_ops by a path of zero or more
# backpropagatable tensors.
reachable_to_ops = set(op for op in to_ops if op in reached_ops)
# Mark between ops.
between_ops = set()
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if op in reached_ops:
between_ops.add(op)
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops.remove(op)
for inp in _NonEagerInputs(op, xs_set):
queue.append(inp.op)
# X in between_ops iff X is on a path of zero or more backpropagatable tensors
# between from_ops and to_ops
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_state.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = collections.defaultdict(int)
for op in between_op_list:
for x in _NonEagerInputs(op, xs_set):
if x.op in between_ops:
pending_count[x.op] += 1
return reachable_to_ops, pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys,
ys,
colocate_gradients_with_ops,
gradient_uid="__unsupported__"):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(
array_ops.ones(
array_ops.shape(y), dtype=y.dtype, name="grad_ys_%d" % i))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError(
"Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" % (dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError(
"Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
elif y.dtype == dtypes.variant:
if grad_y.dtype != dtypes.variant:
raise TypeError(
"Gradient type %s generated for variant "
"tensor %s with type %s must be variant" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
elif y.dtype == dtypes.resource:
# We assume y is the handle of a ResourceVariable. The gradient of a
# ResourceVariable should be a numeric value, not another resource.
if grad_y.dtype == dtypes.resource:
raise TypeError("Input gradient %s for resource tensor %s should not "
"be a resource" % (grad_y, y))
else:
raise TypeError(
"Tensor %s with type %s must be numeric "
"to obtain a default gradient" % (y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsBackpropagatable(tensor):
if backprop_util.IsTrainable(tensor):
return True
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype == dtypes.bfloat16
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
# While ops have inputs added to them during the gradient computation, so we
# skip the below check. See while_v2 for details.
if op.type == "While" or op.type == "StatelessWhile":
return
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: mapping from operation to number of backprop inputs.
xs_set: ObjectIdentitySet of Tensors.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in _NonEagerInputs(op, xs_set):
if pending_count[inp.op] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op)
stop_ops.update(op for op in stop_gradient_ops)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access
yield
else:
yield
def _IsPartitionedCall(op):
return op.type == "PartitionedCall" or op.type == "StatefulPartitionedCall"
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs]
f = attr_value_pb2.NameAttrList()
if _IsPartitionedCall(op):
f.name = op.get_attr("f").name
else:
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
xla_compile = False
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set):
"""Raises an error if we backprop through a loop var."""
# Find the nearest 'to_op' reachable from 'op' to provide a more helpful error
# message.
target_op = None
queue = collections.deque([op])
visited = set()
while queue:
curr_op = queue.popleft()
if curr_op in visited: continue
visited.add(curr_op)
if curr_op in from_ops:
target_op = curr_op
break
queue.extend(t.op for t in _NonEagerInputs(curr_op, xs_set))
assert target_op
raise ValueError(
"Cannot compute gradient inside while loop with respect to op '%s'. "
"We do not support taking the gradient wrt or through the initial value "
"of a loop variable. Gradients can be computed through loop invariants "
"or wrt the input parameters to the loop body."
% target_op.name)
def _IsFunction(graph):
return (isinstance(graph, FuncGraph) or
isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access
def _Captures(func_graph):
if isinstance(func_graph, FuncGraph):
return func_graph.captures
else:
assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access
return func_graph.captures
def _MaybeCaptured(t):
"""If t is a captured value placeholder, returns the original captured value.
Args:
t: Tensor
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
# pylint: disable=protected-access
if (not isinstance(t, ops.EagerTensor) and
_IsFunction(t.op.graph) and t.op.type == "Placeholder"):
for input_t, placeholder_t in _Captures(t.op.graph):
if t is placeholder_t:
return _MaybeCaptured(input_t)
# pylint: enable=protected-access
return t
def _NonEagerInputs(op, xs_set):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Does not return any captured EagerTensors, i.e., the number of tensors
returned may be less than the actual number of inputs.
Args:
op: Operation
xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
"""
return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]
# TODO(skyewm): plumbing xs through everywhere is ugly, consider making
# _GradientsHelper a class with xs as a member variable.
def _Inputs(op, xs_set):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Args:
op: Operation
xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
"""
if _IsFunction(op.graph): # pylint: disable=protected-access
inputs = []
for t in op.inputs:
# If we're differentiating w.r.t. `t`, do not attempt to traverse through
# it to a captured value. The algorithm needs to "see" `t` in this case,
# even if it's a function input for a captured value, whereas usually we'd
# like to traverse through these closures as if the captured value was the
# direct input to op.
if t not in xs_set:
t = _MaybeCaptured(t)
inputs.append(t)
return inputs
else:
return op.inputs
def _Consumers(t, func_graphs):
"""Returns the consumers of t, crossing closure boundaries where necessary.
Args:
t: Tensor
func_graphs: a list of FuncGraphs that may have captured t.
Returns:
A list of tensors. The tensors will be from the current graph and/or
func_graphs.
"""
consumers = t.consumers()
for func in func_graphs:
for input_t, placeholder in _Captures(func):
if input_t is t:
consumers.extend(_Consumers(placeholder, func_graphs))
return consumers
def _GradientsHelper(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE,
src_graph=None):
"""Implementation of gradients()."""
if context.executing_eagerly():
raise RuntimeError("tf.gradients is not supported when eager execution "
"is enabled. Use tf.GradientTape instead.")
if src_graph is None:
src_graph = ops.get_default_graph()
try:
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
except ValueError:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
# If src_graph is a _FuncGraph (i.e. a function body), gather it and all
# ancestor graphs. This is necessary for correctly handling captured values.
func_graphs = []
curr_graph = src_graph
while _IsFunction(curr_graph):
func_graphs.append(curr_graph)
if isinstance(curr_graph, FuncGraph):
curr_graph = curr_graph.outer_graph
else:
assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access
curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
# Get a uid for this call to gradients that can be used to help
# cluster ops for compilation.
gradient_uid = ops.get_default_graph().unique_name("uid")
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [
x.handle if resource_variable_ops.is_resource_variable(x) else x
for x in xs
]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(
xs, name="x", as_ref=True)
xs_set = object_identity.ObjectIdentitySet(xs)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,
gradient_uid)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
reachable_to_ops, pending_count, loop_state = _PendingCount(
to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs_set)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
ready = (pending_count[op] == 0)
if ready and op not in to_ops_set and op in reachable_to_ops:
to_ops_set.add(op)
queue.append(op)
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if backprop_util.IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,
aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
func_call = None
is_partitioned_call = _IsPartitionedCall(op)
# pylint: disable=protected-access
is_func_call = (
src_graph._is_function(op.type) or is_partitioned_call)
# pylint: enable=protected-access
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op not in stop_ops):
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
if is_func_call:
if is_partitioned_call:
func_name = compat.as_bytes(op.get_attr("f").name)
func_call = src_graph._get_function( # pylint: disable=protected-access
func_name)
# When a graph is imported, the FunctionDefs are not copied over
# to each sub-graph so we recursively search the outer graphs
# for the FunctionDef.
if not func_call and hasattr(src_graph, "outer_graph"):
graph = src_graph.outer_graph
while graph is not None:
func_call = graph._get_function(func_name) # pylint: disable=protected-access
if func_call is not None:
break
if hasattr(graph, "outer_graph"):
graph = graph.outer_graph
else:
break
else:
func_call = src_graph._get_function(op.type) # pylint: disable=protected-access
# Note that __defun is not set if the graph is
# imported. If it's set, we prefer to access the original
# defun.
func_call = getattr(op, "__defun", func_call)
grad_fn = func_call.python_grad_func
else:
raise LookupError(
"No gradient defined for operation" +
"'%s' (op type: %s). " %(op.name, op.type) +
"In general every operation must have an associated " +
"`@tf.RegisterGradient` for correct autodiff, which this " +
"op is lacking. If you want to pretend this " +
"operation is a constant in your program, you may insert " +
"`tf.stop_gradient`. This can be useful to silence the " +
"error in cases where you know gradients are not needed, " +
"e.g. the forward pass of tf.custom_gradient. " +
"Please see more details in " +
"https://www.tensorflow.org/api_docs/python/tf/custom_gradient.") # pylint: disable=line-too-long
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
# NOTE(skyewm): We don't support computing gradients wrt a loop variable
# unless it's within the context of a single iteration (i.e. the
# gradient is wrt to the loop parameter in the body function, not wrt or
# through the initial value). This means if we're in a while loop
# context, we should never see a switch node from this context.
# pylint: disable=protected-access
if (control_flow_util.IsSwitch(op) and
op._control_flow_context is not None and
op._control_flow_context.IsWhileContext() and
op._control_flow_context ==
ops.get_default_graph()._get_control_flow_context()):
_RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set)
# pylint: enable=protected-access
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (
(not grad_fn and is_func_call)
or backprop_util.IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLikeV1WhileLoop(op, i)
elif default_gradient.supports_default_grad(op.outputs[i]):
# TODO(b/143286622): The supports_default_grad check is needed
# because While op emits non-differentiable resource tensors
# as outputs. Remove this check when that is not the case.
out_grads[i] = control_flow_state.ZerosLike(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with src_graph._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
if x is not None]) > 1:
with ops.device(None):
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
None,
gradient_uid,
ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(_Inputs(op, xs_set))
# Note: we don't filter out eager inputs here because the inputs need to
# line up with in_grads.
for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs_set), in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s" %
(op.name, i, t_in.shape, in_grad.shape))
if not isinstance(t_in, ops.EagerTensor):
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs_set)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x, unconnected_gradients) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections_abc.Sequence):
if any(g is not None for g in out_grad):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs_set):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in _NonEagerInputs(op, xs_set):
pending_count[x.op] -= 1
ready = (pending_count[x.op] == 0)
if loop_state and not ready:
ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if backprop_util.IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _ZerosLike(t):
t_dtype = default_gradient.get_zeros_dtype(t)
if t.dtype == dtypes.resource:
return array_ops.zeros(
resource_variable_ops.variable_shape(t), dtype=t_dtype)
else:
return array_ops.zeros_like(t, dtype=t_dtype)
def _GetGrad(grads, t, unconnected_gradients):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
if unconnected_gradients == UnconnectedGradients.ZERO:
return _ZerosLike(t)
elif unconnected_gradients == UnconnectedGradients.NONE:
return None
else:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
t_grad = op_grads[t.value_index]
# This can happen if some other output of `t.op` has non-None grad.
if unconnected_gradients == UnconnectedGradients.ZERO and t_grad is None:
return _ZerosLike(t)
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join(x.name for x in out_grads if _FilterGrad(x)))
logging.vlog(1, " out --> %s",
", ".join(x.name for x in in_grads if _FilterGrad(x)))
def _MultiDeviceAddN(tensor_list, gradient_uid):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(tensors_on_device, key=DeviceKey):
tensors = tensors_on_device[dev]
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
tensors[0].op,
gradient_uid,
ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
@tf_export("AggregationMethod")
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph.
The following aggregation methods are part of the stable API for
aggregating gradients:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op (see `tf.add_n`). This
method has the property that all gradients must be ready and
buffered separately in memory before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
The following aggregation methods are experimental and may not
be supported in future releases:
* `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using
the "AddN" op. This method of summing gradients may reduce
performance, but it can improve memory utilization because the
gradients can be released earlier.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2 # An alias for EXPERIMENTAL_ADD_N = 1
def _AggregatedGrads(grads,
op,
gradient_uid,
loop_state,
aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections_abc.Sequence) and not all(
isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad
if g is not None)):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None):
tensor_shape = _AccumulatorShape(out_grad)
if aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grads[i] = backprop.aggregate_indexed_slices_gradients(out_grad) # pylint: disable=protected-access
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# Represents the output of TFE_Py_TapeSetPossibleGradientTypes. Real enums are
# unfortunately too slow to use here.
POSSIBLE_GRADIENT_TYPES_NONE = 0
POSSIBLE_GRADIENT_TYPES_FIRST_ORDER = 1
POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER = 2
def PossibleTapeGradientTypes(tensors):
"""Determines whether and how `args` may require tape gradients."""
return pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes(tensors)
|
frreiss/tensorflow-fred
|
tensorflow/python/ops/gradients_util.py
|
Python
|
apache-2.0
| 40,873
|
[
"VisIt"
] |
f26aecbefb7fb35333942ad97945cdea9ba2284d2ac866cb1ae8c41fe5408e02
|
#!/usr/bin/env python3
import torch
class _feature_flag(object):
_state = False
@classmethod
def on(cls):
return cls._state
@classmethod
def off(cls):
return not cls._state
@classmethod
def _set_state(cls, state):
cls._state = state
def __init__(self, state=True):
self.prev = self.__class__.on()
self.state = state
def __enter__(self):
self.__class__._set_state(self.state)
def __exit__(self, *args):
self.__class__._set_state(self.prev)
return False
class _value_context(object):
_global_value = None
@classmethod
def value(cls):
return cls._global_value
@classmethod
def _set_value(cls, value):
cls._global_value = value
def __init__(self, value):
self._orig_value = self.__class__.value()
self._instance_value = value
def __enter__(self,):
self.__class__._set_value(self._instance_value)
def __exit__(self, *args):
self.__class__._set_value(self._orig_value)
return False
class _dtype_value_context(object):
_global_float_value = None
_global_double_value = None
_global_half_value = None
@classmethod
def value(cls, dtype):
if torch.is_tensor(dtype):
dtype = dtype.dtype
if dtype == torch.float:
return cls._global_float_value
elif dtype == torch.double:
return cls._global_double_value
elif dtype == torch.half:
return cls._global_half_value
else:
raise RuntimeError(f"Unsupported dtype for {cls.__name__}.")
@classmethod
def _set_value(cls, float_value, double_value, half_value):
if float_value is not None:
cls._global_float_value = float_value
if double_value is not None:
cls._global_double_value = double_value
if half_value is not None:
cls._global_half_value = half_value
def __init__(self, float=None, double=None, half=None):
self._orig_float_value = self.__class__.value()
self._instance_float_value = float
self._orig_double_value = self.__class__.value()
self._instance_double_value = double
self._orig_half_value = self.__class__.value()
self._instance_half_value = half
def __enter__(self,):
self.__class__._set_value(self._instance_float_value, self._instance_double_value, self._instance_half_value)
def __exit__(self, *args):
self.__class__._set_value(self._orig_float_value, self._orig_double_value, self._orig_half_value)
return False
class _fast_covar_root_decomposition(_feature_flag):
r"""
This feature flag controls how matrix root decompositions (:math:`K = L L^\top`) are computed
(e.g. for sampling, computing caches, etc.).
If set to True, covariance matrices :math:`K` are decomposed with low-rank approximations :math:`L L^\top`,
(:math:`L \in \mathbb R^{n \times k}`) using the Lanczos algorithm.
This is faster for large matrices and exploits structure in the covariance matrix if applicable.
If set to False, covariance matrices :math:`K` are decomposed using the Cholesky decomposition.
.. warning ::
Setting this to False will compute a complete Cholesky decomposition of covariance matrices.
This may be infeasible for GPs with structure covariance matrices.
See also: :class:`gpytorch.settings.max_root_decomposition_size` (to control the
size of the low rank decomposition used).
"""
_state = True
class _fast_log_prob(_feature_flag):
r"""
This feature flag controls how to compute the marginal log likelihood of exact GPs
and the log probability of multivariate normal distributions.
If set to True, log_prob is computed using a modified conjugate gradients algorithm (as
described in `GPyTorch: Blackbox Matrix-Matrix Gaussian Process Inference with GPU Acceleration`_.
This is a stochastic computation, but it is much faster for large matrices
and exploits structure in the covariance matrix if applicable.
If set to False, `log_prob` is computed using the Cholesky decomposition.
.. warning ::
Setting this to False will compute a complete Cholesky decomposition of covariance matrices.
This may be infeasible for GPs with structure covariance matrices.
See also: :class:`gpytorch.settings.num_trace_samples` (to control the
stochasticity of the fast `log_prob` estimates).
.. _GPyTorch: Blackbox Matrix-Matrix Gaussian Process Inference with GPU Acceleration:
https://arxiv.org/pdf/1809.11165.pdf
"""
_state = True
class _fast_solves(_feature_flag):
r"""
This feature flag controls how to compute solves with positive definite matrices.
If set to True, solves are computed using preconditioned conjugate gradients.
If set to False, `log_prob` is computed using the Cholesky decomposition.
.. warning ::
Setting this to False will compute a complete Cholesky decomposition of covariance matrices.
This may be infeasible for GPs with structure covariance matrices.
"""
_state = True
class skip_posterior_variances(_feature_flag):
"""
Whether or not to skip the posterior covariance matrix when doing an ExactGP
forward pass. If this is on, the returned gpytorch MultivariateNormal will have a
ZeroLazyTensor as its covariance matrix. This allows gpytorch to not compute
the covariance matrix when it is not needed, speeding up computations.
"""
_state = False
class detach_test_caches(_feature_flag):
"""
Whether or not to detach caches computed for making predictions. In most cases, you will want this,
as this will speed up derivative computations of the predictions with respect to test inputs. However,
if you also need derivatives with respect to training inputs (e.g., because you have fantasy observations),
then you must disable this.
"""
_state = True
class deterministic_probes(_feature_flag):
"""
Whether or not to resample probe vectors every iteration of training. If True, we use the same set of probe vectors
for computing log determinants each iteration. This introduces small amounts of bias in to the MLL, but allows us
to compute a deterministic estimate of it which makes optimizers like L-BFGS more viable choices.
NOTE: Currently, probe vectors are cached in a global scope. Therefore, this setting cannot be used
if multiple independent GP models are being trained in the same context (i.e., it works fine with a single GP model)
"""
_state = False
probe_vectors = None
@classmethod
def _set_state(cls, state):
cls._state = state
cls.probe_vectors = None
class debug(_feature_flag):
"""
Whether or not to perform "safety" checks on the supplied data.
(For example, that the correct training data is supplied in Exact GP training mode)
Pros: fewer data checks, fewer warning messages
Cons: possibility of supplying incorrect data, model accidentially in wrong mode
"""
_state = True
class fast_pred_var(_feature_flag):
"""
Fast predictive variances using Lanczos Variance Estimates (LOVE)
Use this for improved performance when computing predictive variances.
As described in the paper:
`Constant-Time Predictive Distributions for Gaussian Processes`_.
See also: :class:`gpytorch.settings.max_root_decomposition_size` (to control the
size of the low rank decomposition used for variance estimates).
.. _`Constant-Time Predictive Distributions for Gaussian Processes`:
https://arxiv.org/pdf/1803.06058.pdf
"""
_num_probe_vectors = 1
@classmethod
def num_probe_vectors(cls):
return cls._num_probe_vectors
@classmethod
def _set_num_probe_vectors(cls, value):
cls._num_probe_vectors = value
def __init__(self, state=True, num_probe_vectors=1):
self.orig_value = self.__class__.num_probe_vectors()
self.value = num_probe_vectors
super(fast_pred_var, self).__init__(state)
def __enter__(self):
self.__class__._set_num_probe_vectors(self.value)
super(fast_pred_var, self).__enter__()
def __exit__(self, *args):
self.__class__._set_num_probe_vectors(self.orig_value)
return super(fast_pred_var, self).__exit__()
class fast_pred_samples(_feature_flag):
"""
Fast predictive samples using Lanczos Variance Estimates (LOVE).
Use this for improved performance when sampling from a predictive posterior matrix.
As described in the paper:
`Constant-Time Predictive Distributions for Gaussian Processes`_.
See also: :class:`gpytorch.settings.max_root_decomposition_size` (to control the
size of the low rank decomposition used for samples).
.. _`Constant-Time Predictive Distributions for Gaussian Processes`:
https://arxiv.org/pdf/1803.06058.pdf
"""
pass
class fast_computations(object):
r"""
This feature flag controls whether or not to use fast approximations to various mathematical
functions used in GP inference.
The functions that can be controlled are:
* :attr:`covar_root_decomposition`
This feature flag controls how matrix root decompositions
(:math:`K = L L^\top`) are computed (e.g. for sampling, computing caches, etc.).
* If set to True,
covariance matrices :math:`K` are decomposed with low-rank approximations :math:`L L^\top`,
(:math:`L \in \mathbb R^{n \times k}`) using the Lanczos algorithm.
This is faster for large matrices and exploits structure in the covariance matrix if applicable.
* If set to False,
covariance matrices :math:`K` are decomposed using the Cholesky decomposition.
* :attr:`log_prob`
This feature flag controls how GPyTorch computes the marginal log likelihood for exact GPs
and `log_prob` for multivariate normal distributions
* If set to True,
`log_prob` is computed using a modified conjugate gradients algorithm (as
described in `GPyTorch Blackbox Matrix-Matrix Gaussian Process Inference with GPU Acceleration`_.
This is a stochastic computation, but it is much faster for large matrices
and exploits structure in the covariance matrix if applicable.
* If set to False,
`log_prob` is computed using the Cholesky decomposition.
* :attr:`fast_solves`
This feature flag controls how GPyTorch computes the solves of positive-definite matrices.
* If set to True,
Solves are computed with preconditioned conjugate gradients.
* If set to False,
Solves are computed using the Cholesky decomposition.
.. warning ::
Setting this to False will compute a complete Cholesky decomposition of covariance matrices.
This may be infeasible for GPs with structure covariance matrices.
By default, approximations are used for all of these functions (except for solves).
Setting any of them to False will use exact computations instead.
See also:
* :class:`gpytorch.settings.max_root_decomposition_size`
(to control the size of the low rank decomposition used)
* :class:`gpytorch.settings.num_trace_samples`
(to control the stochasticity of the fast `log_prob` estimates)
.. _GPyTorch Blackbox Matrix-Matrix Gaussian Process Inference with GPU Acceleration:
https://arxiv.org/pdf/1809.11165.pdf
"""
covar_root_decomposition = _fast_covar_root_decomposition
log_prob = _fast_log_prob
solves = _fast_solves
def __init__(self, covar_root_decomposition=True, log_prob=True, solves=True):
self.covar_root_decomposition = _fast_covar_root_decomposition(covar_root_decomposition)
self.log_prob = _fast_log_prob(log_prob)
self.solves = _fast_solves(solves)
def __enter__(self):
self.covar_root_decomposition.__enter__()
self.log_prob.__enter__()
self.solves.__enter__()
def __exit__(self, *args):
self.covar_root_decomposition.__exit__()
self.log_prob.__exit__()
self.solves.__exit__()
return False
class lazily_evaluate_kernels(_feature_flag):
"""
Lazily compute the entries of covariance matrices (set to True by default).
This can result in memory and speed savings - if say cross covariance terms are not needed
or if you only need to compute variances (not covariances).
If set to False, gpytorch will always compute the entire covariance matrix between
training and test data.
"""
_state = True
class max_eager_kernel_size(_value_context):
"""
If the joint train/test covariance matrix is less than this size, then we will avoid as
much lazy evaluation of the kernel as possible.
Default: 512
"""
_global_value = 512
class max_cg_iterations(_value_context):
"""
The maximum number of conjugate gradient iterations to perform (when computing
matrix solves). A higher value rarely results in more accurate solves -- instead, lower the CG tolerance.
Default: 1000
"""
_global_value = 1000
class min_variance(_dtype_value_context):
"""
The minimum variance that can be returned from :obj:`~gpytorch.distributions.MultivariateNormal#variance`
(default 1e-6). If variances are smaller than this, they are rounded up and a warning is raised.
"""
_global_float_value = 1e-6
_global_double_value = 1e-10
_global_half_value = 1e-3
class cholesky_jitter(_value_context):
"""
The jitter value passed to `psd_safe_cholesky` when using cholesky solves.
Default: None
"""
_global_value = None
class cg_tolerance(_value_context):
"""
Relative residual tolerance to use for terminating CG.
Default: 1
"""
_global_value = 1
class ciq_samples(_feature_flag):
"""
Whether to draw samples using CIQ or not
"""
_state = False
class preconditioner_tolerance(_value_context):
"""
Diagonal trace tolerance to use for checking preconditioner convergence.
Default: 1e-3
"""
_global_value = 1e-3
class eval_cg_tolerance(_value_context):
"""
Relative residual tolerance to use for terminating CG when making predictions.
Default: 0.01
"""
_global_value = 0.01
class _use_eval_tolerance(_feature_flag):
_state = False
class max_cholesky_size(_value_context):
"""
If the size of of a LazyTensor is less than `max_cholesky_size`,
then `root_decomposition` and `inv_matmul` of LazyTensor will use Cholesky rather than Lanczos/CG.
Default: 800
"""
_global_value = 800
class max_root_decomposition_size(_value_context):
"""
The maximum number of Lanczos iterations to perform
This is used when 1) computing variance estiamtes 2) when drawing from MVNs,
or 3) for kernel multiplication
More values results in higher accuracy
Default: 100
"""
_global_value = 100
class max_preconditioner_size(_value_context):
"""
The maximum size of preconditioner to use. 0 corresponds to turning
preconditioning off. When enabled, usually a value of around ~10 works fairly well.
Default: 15
"""
_global_value = 15
class max_lanczos_quadrature_iterations(_value_context):
r"""
The maximum number of Lanczos iterations to perform when doing stochastic
Lanczos quadrature. This is ONLY used for log determinant calculations and
computing Tr(K^{-1}dK/d\theta)
"""
_global_value = 20
class memory_efficient(_feature_flag):
"""
Whether or not to use Toeplitz math with gridded data, grid inducing point modules
Pros: memory efficient, faster on CPU
Cons: slower on GPUs with < 10000 inducing points
"""
_state = False
class min_preconditioning_size(_value_context):
"""
If the size of of a LazyTensor is less than `min_preconditioning_size`,
then we won't use pivoted Cholesky based preconditioning.
Default: 2000
"""
_global_value = 2000
class minres_tolerance(_value_context):
"""
Relative update term tolerance to use for terminating MINRES.
Default: 1e-4
"""
_global_value = 1e-4
class num_contour_quadrature(_value_context):
"""
The number of quadrature points to compute CIQ.
Default: 15
"""
_global_value = 15
class num_likelihood_samples(_value_context):
"""
The number of samples to draw from a latent GP when computing a likelihood
This is used in variational inference and training
Default: 10
"""
_global_value = 10
class num_gauss_hermite_locs(_value_context):
"""
The number of samples to draw from a latent GP when computing a likelihood
This is used in variational inference and training
Default: 10
"""
_global_value = 20
class num_trace_samples(_value_context):
"""
The number of samples to draw when stochastically computing the trace of a matrix
More values results in more accurate trace estimations
If the value is set to 0, then the trace will be deterministically computed
Default: 10
"""
_global_value = 10
class prior_mode(_feature_flag):
"""
If set to true, GP models will be evaluated in prior mode.
This allows evaluating any Exact GP model in prior mode, even it if has training data / targets.
"""
_state = False
class skip_logdet_forward(_feature_flag):
"""
.. warning:
ADVANCED FEATURE. Use this feature ONLY IF you're using
`gpytorch.mlls.MarginalLogLikelihood` as loss functions for optimizing
hyperparameters/variational parameters. DO NOT use this feature if you
need accurate estimates of the MLL (i.e. for model selection, MCMC,
second order optimizaiton methods, etc.)
This feature does not affect the gradients returned by
:meth:`gpytorch.distributions.MultivariateNormal.log_prob`
(used by `gpytorch.mlls.MarginalLogLikelihood`).
The gradients remain unbiased estimates, and therefore can be used with SGD.
However, the actual likelihood value returned by the forward
pass will skip certain computations (i.e. the logdet computation), and will therefore
be improper estimates.
If you're using SGD (or a varient) to optimize parameters, you probably
don't need an accurate MLL estimate; you only need accurate gradients. So
this setting may give your model a performance boost.
"""
_state = False
class terminate_cg_by_size(_feature_flag):
"""
If set to true, cg will terminate after n iterations for an n x n matrix.
"""
_state = False
class trace_mode(_feature_flag):
"""
If set to True, we will generally try to avoid calling our built in PyTorch functions, because these cannot
be run through torch.jit.trace.
Note that this will sometimes involve explicitly evaluating lazy tensors and various other slowdowns and
inefficiencies. As a result, you really shouldn't use this feature context unless you are calling torch.jit.trace
on a GPyTorch model.
Our hope is that this flag will not be necessary long term, once https://github.com/pytorch/pytorch/issues/22329
is fixed.
"""
_state = False
class tridiagonal_jitter(_value_context):
"""
The (relative) amount of noise to add to the diagonal of tridiagonal matrices before
eigendecomposing. root_decomposition becomes slightly more stable with this, as we need
to take the square root of the eigenvalues. Any eigenvalues still negative after adding jitter
will be zeroed out.
"""
_global_value = 1e-6
class use_toeplitz(_feature_flag):
"""
Whether or not to use Toeplitz math with gridded data, grid inducing point modules
Pros: memory efficient, faster on CPU
Cons: slower on GPUs with < 10000 inducing points
"""
_state = True
|
jrg365/gpytorch
|
gpytorch/settings.py
|
Python
|
mit
| 20,168
|
[
"Gaussian"
] |
00c18c0fc65e1dd02e92f46b8bca9c7f03b4ab39f35a05e2299d6d1d0ec24446
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
|
matthappens/taskqueue
|
taskqueue/venv_tq/lib/python2.7/site-packages/boto/gs/key.py
|
Python
|
mit
| 42,415
|
[
"VisIt"
] |
f5f22a1b54733c212399512bde0047feea8beaebcaec2eb8870bee3c7704fa0f
|
"""
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import heapq
from operator import itemgetter
from protos import objects
class Node(object):
def __init__(self, node_id, ip=None, port=None, pubkey=None,
relay_node=None, nat_type=None, vendor=False):
self.id = node_id
self.ip = ip
self.port = port
self.pubkey = pubkey
self.relay_node = relay_node
self.nat_type = nat_type
self.vendor = vendor
self.long_id = long(node_id.encode('hex'), 16)
def getProto(self):
node_address = objects.Node.IPAddress()
node_address.ip = self.ip
node_address.port = self.port
n = objects.Node()
n.guid = self.id
n.publicKey = self.pubkey
n.natType = self.nat_type
n.nodeAddress.MergeFrom(node_address)
n.vendor = self.vendor
if self.relay_node is not None:
relay_address = objects.Node.IPAddress()
relay_address.ip = self.relay_node[0]
relay_address.port = self.relay_node[1]
n.relayAddress.MergeFrom(relay_address)
return n
def sameHomeAs(self, node):
return self.ip == node.ip and self.port == node.port
def distanceTo(self, node):
"""
Get the distance between this node and another.
"""
return self.long_id ^ node.long_id
def __iter__(self):
"""
Enables use of Node as a tuple - i.e., tuple(node) works.
"""
return iter([self.id, self.ip, self.port])
def __repr__(self):
return repr([self.long_id, self.ip, self.port])
def __str__(self):
return "%s:%s" % (self.ip, str(self.port))
class NodeHeap(object):
"""
A heap of nodes ordered by distance to a given node.
"""
def __init__(self, node, maxsize):
"""
Constructor.
@param node: The node to measure all distances from.
@param maxsize: The maximum size that this heap can grow to.
"""
self.node = node
self.heap = []
self.contacted = set()
self.maxsize = maxsize
def remove(self, peerIDs):
"""
Remove a list of peer ids from this heap. Note that while this
heap retains a constant visible size (based on the iterator), it's
actual size may be quite a bit larger than what's exposed. Therefore,
removal of nodes may not change the visible size as previously added
nodes suddenly become visible.
"""
peerIDs = set(peerIDs)
if len(peerIDs) == 0:
return
nheap = []
for distance, node in self.heap:
if node.id not in peerIDs:
heapq.heappush(nheap, (distance, node))
self.heap = nheap
def getNodeById(self, node_id):
for _, node in self.heap:
if node.id == node_id:
return node
return None
def allBeenContacted(self):
return len(self.getUncontacted()) == 0
def getIDs(self):
return [n.id for n in self]
def markContacted(self, node):
self.contacted.add(node.id)
def popleft(self):
if len(self) > 0:
return heapq.heappop(self.heap)[1]
return None
def push(self, nodes):
"""
Push nodes onto heap.
@param nodes: This can be a single item or a C{list}.
"""
if not isinstance(nodes, list):
nodes = [nodes]
for node in nodes:
if node not in self:
distance = self.node.distanceTo(node)
heapq.heappush(self.heap, (distance, node))
def __len__(self):
return min(len(self.heap), self.maxsize)
def __iter__(self):
nodes = heapq.nsmallest(self.maxsize, self.heap)
return iter(map(itemgetter(1), nodes))
def __contains__(self, node):
# pylint: disable=unused-variable
for distance, n in self.heap:
if node.id == n.id:
return True
return False
def getUncontacted(self):
return [n for n in self if n.id not in self.contacted]
|
saltduck/OpenBazaar-Server
|
dht/node.py
|
Python
|
mit
| 4,175
|
[
"Brian"
] |
82b7369279f3637d8fbcb1097c390eae2dcec2b7498af0099c49bc609671f5e1
|
#!/usr/bin/env python3
# coding:utf-8
# Based on GAppProxy 2.0.0 by Du XiaoGang <dugang.2008@gmail.com>
# Based on WallProxy 0.4.0 by Hust Moon <www.ehust@gmail.com>
# Contributor:
# Phus Lu <phus.lu@gmail.com>
# Hewig Xu <hewigovens@gmail.com>
# Ayanamist Yang <ayanamist@gmail.com>
# V.E.O <V.E.O@tom.com>
# Max Lv <max.c.lv@gmail.com>
# AlsoTang <alsotang@gmail.com>
# Christopher Meng <cickumqt@gmail.com>
# Yonsm Guo <YonsmGuo@gmail.com>
# Parkman <cseparkman@gmail.com>
# Ming Bai <mbbill@gmail.com>
# Bin Yu <yubinlove1991@gmail.com>
# lileixuan <lileixuan@gmail.com>
# Cong Ding <cong@cding.org>
# Zhang Youfu <zhangyoufu@gmail.com>
# Lu Wei <luwei@barfoo>
# Harmony Meow <harmony.meow@gmail.com>
# logostream <logostream@gmail.com>
# Rui Wang <isnowfy@gmail.com>
# Wang Wei Qiang <wwqgtxx@gmail.com>
# Felix Yan <felixonmars@gmail.com>
# QXO <qxodream@gmail.com>
# Geek An <geekan@foxmail.com>
# Poly Rabbit <mcx_221@foxmail.com>
# oxnz <yunxinyi@gmail.com>
# Shusen Liu <liushusen.smart@gmail.com>
# Yad Smood <y.s.inside@gmail.com>
# Chen Shuang <cs0x7f@gmail.com>
# cnfuyu <cnfuyu@gmail.com>
# cuixin <steven.cuixin@gmail.com>
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
data_path = os.path.join(root_path, 'data')
data_gae_proxy_path = os.path.join(data_path, 'gae_proxy')
gae_path = os.path.join(root_path, "gae_proxy")
sys.path.append(gae_path)
noarch_lib = os.path.join(root_path, 'lib', 'noarch')
sys.path.append(noarch_lib)
common_lib = os.path.join(root_path, 'lib', 'common')
sys.path.append(common_lib)
if sys.platform == "win32":
win32_lib = os.path.join(root_path, 'lib', 'win32')
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.join(root_path, 'lib', 'linux')
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.join(root_path, 'lib', 'darwin')
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import time
import traceback
import platform
import random
import threading
import urllib.request, urllib.error, urllib.parse
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
work_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(work_path)
def create_data_path():
if not os.path.isdir(data_path):
os.mkdir(data_path)
if not os.path.isdir(data_gae_proxy_path):
os.mkdir(data_gae_proxy_path)
create_data_path()
from local.config import config
from xlog import getLogger
xlog = getLogger("gae_proxy")
xlog.set_buffer(500)
if config.log_file:
log_file = os.path.join(data_gae_proxy_path, "local.log")
xlog.set_file(log_file)
from local.cert_util import CertUtil
from local import pac_server
import simple_http_server
from local import proxy_handler
from local import connect_control
from local import env_info
from local import connect_manager
from local.gae_handler import spawn_later
from local import web_control
# launcher/module_init will check this value for start/stop finished
ready = False
def pre_start():
def get_windows_running_process_list():
import os
import glob
import ctypes
import collections
Process = collections.namedtuple('Process', 'pid name exe')
process_list = []
if os.name == 'nt':
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
lpidProcess= (ctypes.c_ulong * 1024)()
cb = ctypes.sizeof(lpidProcess)
cbNeeded = ctypes.c_ulong()
ctypes.windll.psapi.EnumProcesses(ctypes.byref(lpidProcess), cb, ctypes.byref(cbNeeded))
nReturned = cbNeeded.value/ctypes.sizeof(ctypes.c_ulong())
pidProcess = [i for i in lpidProcess][:nReturned]
has_queryimage = hasattr(ctypes.windll.kernel32, 'QueryFullProcessImageNameA')
for pid in pidProcess:
hProcess = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 0, pid)
if hProcess:
modname = ctypes.create_string_buffer(2048)
count = ctypes.c_ulong(ctypes.sizeof(modname))
if has_queryimage:
ctypes.windll.kernel32.QueryFullProcessImageNameA(hProcess, 0, ctypes.byref(modname), ctypes.byref(count))
else:
ctypes.windll.psapi.GetModuleFileNameExA(hProcess, 0, ctypes.byref(modname), ctypes.byref(count))
exe = modname.value
name = os.path.basename(exe)
process_list.append(Process(pid=pid, name=name, exe=exe))
ctypes.windll.kernel32.CloseHandle(hProcess)
elif sys.platform.startswith('linux'):
for filename in glob.glob('/proc/[0-9]*/cmdline'):
pid = int(filename.split('/')[2])
exe_link = '/proc/%d/exe' % pid
if os.path.exists(exe_link):
exe = os.readlink(exe_link)
name = os.path.basename(exe)
process_list.append(Process(pid=pid, name=name, exe=exe))
else:
try:
import psutil
process_list = psutil.get_process_list()
except Exception as e:
xlog.exception('psutil.get_windows_running_process_list() failed: %r', e)
return process_list
if sys.platform == 'cygwin':
xlog.info('cygwin is not officially supported, please continue at your own risk :)')
#sys.exit(-1)
elif os.name == 'posix':
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, -1))
except Exception as e:
pass
elif os.name == 'nt':
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW('GoAgent ')
if not config.LISTEN_VISIBLE:
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
else:
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 1)
if config.LOVE_ENABLE and random.randint(1, 100) <= 5:
title = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetConsoleTitleW(ctypes.byref(title), len(title)-1)
ctypes.windll.kernel32.SetConsoleTitleW('%s %s' % (title.value, random.choice(config.LOVE_TIP)))
blacklist = {'360safe': False,
'QQProtect': False, }
softwares = [k for k, v in blacklist.items() if v]
if softwares:
tasklist = '\n'.join(x.name for x in get_windows_running_process_list()).lower()
softwares = [x for x in softwares if x.lower() in tasklist]
if softwares:
title = 'GoAgent 建议'
error = '某些安全软件(如 %s)可能和本软件存在冲突,造成 CPU 占用过高。\n如有此现象建议暂时退出此安全软件来继续运行GoAgent' % ','.join(softwares)
ctypes.windll.user32.MessageBoxW(None, error, title, 0)
#sys.exit(0)
if config.PAC_ENABLE:
pac_ip = config.PAC_IP
url = 'http://%s:%d/%s' % (pac_ip, config.PAC_PORT, config.PAC_FILE)
spawn_later(600, urllib.request.build_opener(urllib.request.ProxyHandler({})).open, url)
def log_info():
xlog.info('------------------------------------------------------')
xlog.info('Python Version : %s', platform.python_version())
xlog.info('OS : %s', env_info.os_detail())
xlog.info('Listen Address : %s:%d', config.LISTEN_IP, config.LISTEN_PORT)
if config.CONTROL_ENABLE:
xlog.info('Control Address : %s:%d', config.CONTROL_IP, config.CONTROL_PORT)
if config.PROXY_ENABLE:
xlog.info('%s Proxy : %s:%s', config.PROXY_TYPE, config.PROXY_HOST, config.PROXY_PORT)
xlog.info('GAE APPID : %s', '|'.join(config.GAE_APPIDS))
if config.PAC_ENABLE:
xlog.info('Pac Server : http://%s:%d/%s', config.PAC_IP, config.PAC_PORT, config.PAC_FILE)
#info += 'Pac File : file://%s\n' % os.path.join(self.DATA_PATH, self.PAC_FILE)
xlog.info('------------------------------------------------------')
def main():
global ready
connect_control.keep_running = True
config.load()
connect_manager.https_manager.load_config()
xlog.debug("## GAEProxy set keep_running: %s", connect_control.keep_running)
# to profile gae_proxy, run proxy.py, visit some web by proxy, then visit http://127.0.0.1:8084/quit to quit and print result.
do_profile = False
if do_profile:
import cProfile, pstats
pr = cProfile.Profile()
pr.enable()
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#xlog.basicConfig(level=xlog.DEBUG if config.LISTEN_DEBUGINFO else xlog.INFO, format='%(levelname)s - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
pre_start()
log_info()
CertUtil.init_ca()
proxy_daemon = simple_http_server.HTTPServer((config.LISTEN_IP, config.LISTEN_PORT), proxy_handler.GAEProxyHandler)
proxy_thread = threading.Thread(target=proxy_daemon.serve_forever)
proxy_thread.setDaemon(True)
proxy_thread.start()
if config.PAC_ENABLE:
pac_daemon = simple_http_server.HTTPServer((config.PAC_IP, config.PAC_PORT), pac_server.PACServerHandler)
pac_thread = threading.Thread(target=pac_daemon.serve_forever)
pac_thread.setDaemon(True)
pac_thread.start()
ready = True # checked by launcher.module_init
while connect_control.keep_running:
time.sleep(1)
xlog.info("Exiting gae_proxy module...")
proxy_daemon.shutdown()
proxy_daemon.server_close()
proxy_thread.join()
if config.PAC_ENABLE:
pac_daemon.shutdown()
pac_daemon.server_close()
pac_thread.join()
ready = False # checked by launcher.module_init
xlog.debug("## GAEProxy set keep_running: %s", connect_control.keep_running)
if do_profile:
pr.disable()
pr.print_stats()
# called by launcher/module/stop
def terminate():
xlog.info("start to terminate GAE_Proxy")
connect_control.keep_running = False
xlog.debug("## Set keep_running: %s", connect_control.keep_running)
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
terminate()
sys.exit()
|
Suwmlee/XX-Net
|
gae_proxy/local/proxy.py
|
Python
|
bsd-2-clause
| 11,261
|
[
"VisIt"
] |
90b489de07f3d02de70f2455c4a8ce3a387116f0bd80c9348d072926b0b2d206
|
#!/home/paulk/software/bin/python
from __future__ import division
from sys import argv,exit,stderr
from key_functions import *
import pysam
from cPickle import load
f = open("resources/exons.hg19.Ens66.0-based.pic")
exons = load(f)
f.close()
print "exon\tincl\texcl\ttotal\tupintrlen\texonlen\tdownintrlen"
f = open("test.out")
c = 0
for row in f:
if c > 20: break
l = row.strip().split('\t')
tx,ex = l[0].split(':')
exon = int(ex)
bexon_reg = exons[tx+":"+str(exon-1)]
exon_reg = exons[l[0]]
aexon_reg = exons[tx+":"+str(exon+1)]
bintron_length = simple_intron_length(bexon_reg,exon_reg)
chrom,st_sp,sd = exon_reg.split(':')
e = map(int,st_sp.split('-'))
exon_length = abs(e[1] - e[0]) + 1
aintron_length = simple_intron_length(exon_reg,aexon_reg)
print row.strip()+"\t"+bintron_length+"\t"+str(exon_length)+"\t"+aintron_length
c += 0
f.close()
|
polarise/RP-python
|
intronLen2ExonInclRate.py
|
Python
|
gpl-2.0
| 862
|
[
"pysam"
] |
27eab0a1f4f01a7327bcc3d4a95d5223e979019915ff7217709846392aa9502d
|
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.io import RecordError
from skbio.parse.record import DelimitedSplitter
import warnings
def _label_line_parser(record, splitter, strict=True):
"""Returns dict mapping list of data to labels, plus list with field order.
Field order contains labels in order encountered in file.
NOTE: doesn't care if lines are out of order in different blocks. This
should never happen anyway, but it's possible that this behavior should
be changed to tighten up validation.
"""
labels = []
result = {}
for line in record:
try:
key, val = splitter(line.rstrip())
except:
if strict:
raise RecordError(
"Failed to extract key and value from line %s" %
line)
else:
continue # just skip the line if not strict
if key in result:
result[key].append(val)
else:
result[key] = [val]
labels.append(key)
return result, labels
def _is_clustal_seq_line(line):
"""Returns True if line starts with a non-blank character but not 'CLUSTAL'
Useful for filtering other lines out of the file.
"""
return line and (not line[0].isspace()) and\
(not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))
last_space = DelimitedSplitter(None, -1)
def _delete_trailing_number(line):
"""Deletes trailing number from a line.
WARNING: does not preserve internal whitespace when a number is removed!
(converts each whitespace run to a single space). Returns the original
line if it didn't end in a number.
"""
pieces = line.split()
try:
int(pieces[-1])
return ' '.join(pieces[:-1])
except ValueError: # no trailing numbers
return line
def write_clustal(records, fh):
warnings.warn(
"write_clustal is deprecated and will be removed in "
"scikit-bio 0.3.0. Please update your code to use Alignment.write.",
DeprecationWarning)
clen = 60
records = list(records)
names, seqs = zip(*records)
nameLen = max(map(len, names))
seqLen = max(map(len, seqs))
fh.write('CLUSTAL\n\n')
for i in range(0, seqLen, clen):
for label, seq in records:
name = ('{:<%d}' % (nameLen)).format(label)
fh.write("%s\t%s\t\n" % (name, seq[i:i+clen]))
fh.write("\n")
def parse_clustal(record, strict=True):
warnings.warn(
"parse_clustal is deprecated and will be removed in "
"scikit-bio 0.3.0. Please update your code to use Alignment.read.",
DeprecationWarning)
records = map(_delete_trailing_number,
filter(_is_clustal_seq_line, record))
data, labels = _label_line_parser(records, last_space, strict)
for key in labels:
yield key, ''.join(data[key])
|
Kleptobismol/scikit-bio
|
skbio/parse/sequences/clustal.py
|
Python
|
bsd-3-clause
| 3,294
|
[
"scikit-bio"
] |
ba6e1e6643a1dc091eec83770bcfdff4164f97a706ced4911b116f9a2e3b2703
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
# Some globals:
targetName = 'GAMESS-UK'
debug = False
def getOptions():
userOptions = {}
userOptions['Title'] = {}
userOptions['Title']['type'] = 'string'
userOptions['Title']['default'] = ''
userOptions['Calculation Type'] = {}
userOptions['Calculation Type']['type'] = "stringList"
userOptions['Calculation Type']['default'] = 1
userOptions['Calculation Type']['values'] = \
['Single Point', 'Equilibrium Geometry', 'Frequencies', 'Transition State']
userOptions['Theory'] = {}
userOptions['Theory']['type'] = "stringList"
userOptions['Theory']['default'] = 2
userOptions['Theory']['values'] = \
['RHF', 'MP2', 'B3LYP', 'BLYP', 'SVWN', 'B97', 'HCTH', 'FT97']
userOptions['Basis'] = {}
userOptions['Basis']['type'] = "stringList"
userOptions['Basis']['default'] = 2
userOptions['Basis']['values'] = \
['STO-3G', '3-21G', '6-31G', '6-31G(d)', 'cc-pVDZ', 'cc-pVTZ']
userOptions['Filename Base'] = {}
userOptions['Filename Base']['type'] = 'string'
userOptions['Filename Base']['default'] = 'job'
userOptions['Charge'] = {}
userOptions['Charge']['type'] = "integer"
userOptions['Charge']['default'] = 0
userOptions['Charge']['minimum'] = -9
userOptions['Charge']['maximum'] = 9
userOptions['Multiplicity'] = {}
userOptions['Multiplicity']['type'] = "integer"
userOptions['Multiplicity']['default'] = 1
userOptions['Multiplicity']['minimum'] = 1
userOptions['Multiplicity']['maximum'] = 6
# TODO Coordinate format (need zmatrix)
userOptions['Direct SCF Mode'] = {}
userOptions['Direct SCF Mode']['type'] = 'boolean'
userOptions['Direct SCF Mode']['default'] = False
opts = {'userOptions' : userOptions}
return opts
def generateInputFile(opts):
# Extract options:
title = opts['Title']
calculate = opts['Calculation Type']
theory = opts['Theory']
basis = opts['Basis']
charge = opts['Charge']
multiplicity = opts['Multiplicity']
directScf = opts['Direct SCF Mode']
# Convert to code-specific strings
calcStr = ''
if calculate == 'Single Point':
calcStr = 'scf'
elif calculate == 'Equilibrium Geometry':
calcStr = 'optxyz' # TODO If we add zmatrix, this will need updating.
elif calculate == 'Frequencies':
calcStr = 'hessian'
elif calculate == 'Transition State':
calcStr = 'saddle'
else:
raise Exception('Unhandled calculation type: %s'%calculate)
theoryStr = ''
if theory in ['RHF', 'MP2']:
theoryStr = 'scftype '
if directScf:
theoryStr += 'direct '
theoryStr += theory.lower()
elif theory in ['B3LYP', 'BLYP', 'SVWN', 'B97', 'HCTH', 'FT97']:
if directScf:
theoryStr += 'scftype direct\n'
theoryStr += 'dft %s'%theory.lower()
else:
raise Exception('Unhandled theory type: %s'%theory)
basisStr = ''
if basis == 'STO-3G':
basisStr = 'sto3g'
elif basis == '6-31G(d)':
basisStr = '6-31G*'
elif basis in ['3-21G', '6-31G', 'cc-pVDZ', 'cc-pVTZ']:
basisStr = basis
else:
raise Exception('Unhandled basis type: %s'%basis)
output = ''
# Copied from 1.x extension. Seems useful.
output += '# This file was generated by Avogadro\n'
output += '# For more GAMESS-UK input options consult the manual at:\n'
output += '# http://www.cfs.dl.ac.uk/docs/index.shtml\n\n'
output += 'title\n%s\n\n'%title
if calculate in ['Equilibrium Geometry', 'Transition State']:
output += '# Ensure orbital vectors printed after optimization:\n'
output += 'iprint vectors\n\n'
output += 'mult %d\ncharge %d\n\n'%(multiplicity, charge)
output += 'geometry angstrom'
if calculate == 'Transition State':
output += ' all'
output += '\n'
output += '$$coords:_xyzZS$$\nend\n\n'
output += 'basis %s\n\n'%basisStr
output += 'runtype %s\n'%calcStr
output += '%s\n\n'%theoryStr
output += 'enter\n'
return output
def generateInput():
# Read options from stdin
stdinStr = sys.stdin.read()
# Parse the JSON strings
opts = json.loads(stdinStr)
# Generate the input file
inp = generateInputFile(opts['options'])
# Basename for input files:
baseName = opts['options']['Filename Base']
# Prepare the result
result = {}
# Input file text -- will appear in the same order in the GUI as they are
# listed in the array:
files = []
files.append({'filename': '%s.gukin'%baseName, 'contents': inp})
if debug:
files.append({'filename': 'debug_info', 'contents': stdinStr})
result['files'] = files
# Specify the main input file. This will be used by MoleQueue to determine
# the value of the $$inputFileName$$ and $$inputFileBaseName$$ keywords.
result['mainFile'] = '%s.gukin'%baseName
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Generate a %s input file.'%targetName)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--print-options', action='store_true')
parser.add_argument('--generate-input', action='store_true')
parser.add_argument('--display-name', action='store_true')
args = vars(parser.parse_args())
debug = args['debug']
if args['display_name']:
print(targetName)
if args['print_options']:
print(json.dumps(getOptions()))
elif args['generate_input']:
print(json.dumps(generateInput()))
|
cjh1/mongochemweb-avogadrolibs
|
avogadro/qtplugins/quantuminput/inputGenerators/gamessuk.py
|
Python
|
bsd-3-clause
| 5,941
|
[
"Avogadro",
"GAMESS"
] |
b8c63a84d8e8b965ad0c7659f792f15e4c04de8f382209bf2e0e1ef318d1583b
|
#!/usr/bin/env python
from pymatgen.io.vasp import Poscar
import cProfile
import pstats
import os
import logging
logging.basicConfig(level=logging.DEBUG)
p = Poscar.from_file("../test_files/POSCAR.LiFePO4", check_for_POTCAR=False)
s = p.structure
def test():
nn = s.get_all_neighbors(20)
print len(nn)
def chgcar_test():
from pymatgen.io.vasp import Chgcar
c = Chgcar.from_file("../test_files/CHGCAR.noncubic")
print c.get_integrated_diff(1, 2.5, 3)
def vasprun_test():
from pymatgen.io.vasp import Vasprun
v = Vasprun("../test_files/vasprun.xml")
print v.final_energy
def matcher_test():
p = Poscar.from_file("../test_files/POSCAR.Li2O")
s = p.structure
from pymatgen.analysis.structure_matcher import StructureMatcher
print StructureMatcher().fit(s, s)
cProfile.run('matcher_test()', 'testprof')
p = pstats.Stats('testprof')
p.sort_stats('cumulative').print_stats(20)
os.remove("testprof")
|
matk86/pymatgen
|
dev_scripts/profile_structure.py
|
Python
|
mit
| 949
|
[
"VASP",
"pymatgen"
] |
008853eb1ae304d90cde113b63c6dae1af2f1d66502727103d85bc77913348ef
|
import click
import os
import requests
import sys
import json
from soccer import leagueids
from soccer.exceptions import IncorrectParametersException, APIErrorException
from soccer.writers import get_writer
BASE_URL = 'http://api.football-data.org/alpha/'
LIVE_URL = 'http://soccer-cli.appspot.com/'
LEAGUE_IDS = leagueids.LEAGUE_IDS
def load_json(file):
"""Load JSON file at app start"""
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, file)) as jfile:
data = json.load(jfile)
return data
TEAM_DATA = load_json("teams.json")["teams"]
TEAM_NAMES = {team["code"]: team["id"] for team in TEAM_DATA}
def get_input_key():
"""Input API key and validate"""
click.secho("No API key found!", fg="yellow", bold=True)
click.secho("Please visit {0} and get an API token.".format(BASE_URL),
fg="yellow", bold=True)
while True:
confkey = click.prompt(click.style("Enter API key",
fg="yellow", bold=True))
if len(confkey) == 32: # 32 chars
try:
int(confkey, 16) # hexadecimal
except ValueError:
click.secho("Invalid API key", fg="red", bold=True)
else:
break
else:
click.secho("Invalid API key", fg="red", bold=True)
return confkey
def load_config_key():
"""Load API key from config file, write if needed"""
global api_token
try:
api_token = os.environ['SOCCER_CLI_API_TOKEN']
except KeyError:
home = os.path.expanduser("~")
config = os.path.join(home, "soccer-cli.ini")
if not os.path.exists(config):
with open(config, "w") as cfile:
key = get_input_key()
cfile.write(key)
else:
with open(config, "r") as cfile:
key = cfile.read()
if key:
api_token = key
else:
os.remove(config) # remove 0-byte file
click.secho('No API Token detected. '
'Please visit {0} and get an API Token, '
'which will be used by Soccer CLI '
'to get access to the data.'
.format(BASE_URL), fg="red", bold=True)
sys.exit(1)
return api_token
def _get(url):
"""Handles api.football-data.org requests"""
req = requests.get(BASE_URL+url, headers=headers)
if req.status_code == requests.codes.ok:
return req
if req.status_code == requests.codes.bad:
raise APIErrorException('Invalid request. Check parameters.')
if req.status_code == requests.codes.forbidden:
raise APIErrorException('This resource is restricted')
if req.status_code == requests.codes.not_found:
raise APIErrorException('This resource does not exist. Check parameters')
if req.status_codes == requests.codes.too_many_requests:
raise APIErrorException('You have exceeded your allowed requests per minute/day')
def get_live_scores(writer, use_12_hour_format):
"""Gets the live scores"""
req = requests.get(LIVE_URL)
if req.status_code == requests.codes.ok:
scores = req.json()
if len(scores["games"]) == 0:
click.secho("No live action currently", fg="red", bold=True)
return
writer.live_scores(scores, use_12_hour_format)
else:
click.secho("There was problem getting live scores", fg="red", bold=True)
def get_team_scores(team, time, writer, show_upcoming, use_12_hour_format):
"""Queries the API and gets the particular team scores"""
team_id = TEAM_NAMES.get(team, None)
time_frame = 'n' if show_upcoming else 'p'
if team_id:
try:
req = _get('teams/{team_id}/fixtures?timeFrame={time_frame}{time}'.format(
team_id=team_id, time_frame=time_frame, time=time))
team_scores = req.json()
if len(team_scores["fixtures"]) == 0:
click.secho("No action during past week. Change the time "
"parameter to get more fixtures.", fg="red", bold=True)
else:
writer.team_scores(team_scores, time, show_upcoming, use_12_hour_format)
except APIErrorException as e:
click.secho(e.args[0],
fg="red", bold=True)
else:
click.secho("Team code is not correct.",
fg="red", bold=True)
def get_standings(league, writer):
"""Queries the API and gets the standings for a particular league"""
league_id = LEAGUE_IDS[league]
try:
req = _get('soccerseasons/{id}/leagueTable'.format(
id=league_id))
writer.standings(req.json(), league)
except APIErrorException:
# Click handles incorrect League codes so this will only come up
# if that league does not have standings available. ie. Champions League
click.secho("No standings availble for {league}.".format(league=league),
fg="red", bold=True)
def get_league_scores(league, time, writer, show_upcoming, use_12_hour_format):
"""
Queries the API and fetches the scores for fixtures
based upon the league and time parameter
"""
time_frame = 'n' if show_upcoming else 'p'
if league:
try:
league_id = LEAGUE_IDS[league]
req = _get('soccerseasons/{id}/fixtures?timeFrame={time_frame}{time}'.format(
id=league_id, time_frame=time_frame, time=str(time)))
fixtures_results = req.json()
# no fixtures in the past week. display a help message and return
if len(fixtures_results["fixtures"]) == 0:
click.secho("No {league} matches in the past week.".format(league=league),
fg="red", bold=True)
return
writer.league_scores(fixtures_results, time, show_upcoming, use_12_hour_format)
except APIErrorException:
click.secho("No data for the given league.", fg="red", bold=True)
else:
# When no league specified. Print all available in time frame.
try:
req = _get('fixtures?timeFrame={time_frame}{time}'.format(
time_frame=time_frame, time=str(time)))
fixtures_results = req.json()
writer.league_scores(fixtures_results, time, show_upcoming, use_12_hour_format)
except APIErrorException:
click.secho("No data available.", fg="red", bold=True)
def get_team_players(team, writer):
"""
Queries the API and fetches the players
for a particular team
"""
team_id = TEAM_NAMES.get(team, None)
try:
req = _get('teams/{team_id}/players'.format(
team_id=team_id))
team_players = req.json()
if int(team_players["count"]) == 0:
click.secho("No players found for this team", fg="red", bold=True)
else:
writer.team_players(team_players)
except APIErrorException:
click.secho("No data for the team. Please check the team code.",
fg="red", bold=True)
def map_team_id(code):
"""Take in team ID, read JSON file to map ID to name"""
for team in TEAM_DATA:
if team["code"] == code:
click.secho(team["name"], fg="green")
break
else:
click.secho("No team found for this code", fg="red", bold=True)
def list_team_codes():
"""List team names in alphabetical order of team ID, per league."""
# Sort teams by league, then alphabetical by code
cleanlist = sorted(TEAM_DATA, key=lambda k: (k["league"]["name"], k["code"]))
# Get league names
leaguenames = sorted(list(set([team["league"]["name"] for team in cleanlist])))
for league in leaguenames:
teams = [team for team in cleanlist if team["league"]["name"] == league]
click.secho(league, fg="green", bold=True)
for team in teams:
if team["code"] != "null":
click.secho(u"{0}: {1}".format(team["code"], team["name"]), fg="yellow")
click.secho("")
@click.command()
@click.option('--apikey', default=load_config_key, help="API key to use")
@click.option('--list', 'listcodes', is_flag=True, help="List all valid team code/team name pairs")
@click.option('--live', is_flag=True, help="Shows live scores from various leagues")
@click.option('--use12hour', is_flag=True, default=False, help="Displays the time using 12 hour format instead of 24 (default).")
@click.option('--standings', is_flag=True, help="Standings for a particular league")
@click.option('--league', '-league', type=click.Choice(LEAGUE_IDS.keys()),
help=("Choose the league whose fixtures you want to see. "
"See league codes listed in README."))
@click.option('--players', is_flag=True, help="Shows players for a particular team")
@click.option('--team', type=click.Choice(TEAM_NAMES.keys()),
help=("Choose the team whose fixtures you want to see. "
"See team codes listed in README."))
@click.option('--lookup', is_flag=True, help="Get team name from team code when used with --team command.")
@click.option('--time', default=6,
help="The number of days in the past for which you want to see the scores")
@click.option('--upcoming', is_flag=True, default=False, help="Displays upcoming games when used with --time command.")
@click.option('--stdout', 'output_format', flag_value='stdout',
default=True, help="Print to stdout")
@click.option('--csv', 'output_format', flag_value='csv',
help='Output in CSV format')
@click.option('--json', 'output_format', flag_value='json',
help='Output in JSON format')
@click.option('-o', '--output-file', default=None,
help="Save output to a file (only if csv or json option is provided)")
def main(league, time, standings, team, live, use12hour, players, output_format, output_file, upcoming, lookup, listcodes, apikey):
"""A CLI for live and past football scores from various football leagues"""
global headers
headers = {
'X-Auth-Token': apikey
}
try:
if output_format == 'stdout' and output_file:
raise IncorrectParametersException('Printing output to stdout and '
'saving to a file are mutually exclusive')
writer = get_writer(output_format, output_file)
if listcodes:
list_team_codes()
return
if live:
get_live_scores(writer, use12hour)
return
if standings:
if not league:
raise IncorrectParametersException('Please specify a league. '
'Example --standings --league=EPL')
get_standings(league, writer)
return
if team:
if lookup:
map_team_id(team)
return
if players:
get_team_players(team, writer)
return
else:
get_team_scores(team, time, writer, upcoming, use12hour)
return
get_league_scores(league, time, writer, upcoming, use12hour)
except IncorrectParametersException as e:
click.secho(e.message, fg="red", bold=True)
if __name__ == '__main__':
main()
|
thurask/soccer-cli
|
soccer/main.py
|
Python
|
mit
| 11,507
|
[
"VisIt"
] |
aacce4295ff475e9674341bea511d19ff7d98511e9b9ba4f0a3af75bca4abbbb
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls.static import static
from django.views import defaults as default_views
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtailsearch import urls as wagtailsearch_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^api/', include(wagtailapi_urls)),
]
urlpatterns += i18n_patterns(
url(r'^search/', include(wagtailsearch_urls)),
#url(r'', include('myapp.urls')), # Optional URL for including your own vanilla Django urls/views
url(r'', include(wagtail_urls)),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
jeremy-c/unusualbusiness
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,693
|
[
"VisIt"
] |
3019c7f99d9f3cd8507b008c2580690563bb1106ddfc107d6e6f0e990fd4a813
|
from __future__ import absolute_import
import logging
import click
import cachetools
try:
import cPickle as pickle
except ImportError:
import pickle
from copy import deepcopy
from pathlib import Path
from pandas import to_datetime
from datetime import datetime
import datacube
from datacube.api.core import Datacube
from datacube.model import DatasetType, GeoPolygon, Range
from datacube.model.utils import make_dataset, xr_apply, datasets_to_doc
from datacube.storage.storage import write_dataset_to_netcdf
from datacube.ui import click as ui
from datacube.utils import read_documents, intersect_points, union_points
from datacube.ui.click import cli
_LOG = logging.getLogger('agdc-ingest')
FUSER_KEY = 'fuse_data'
def find_diff(input_type, output_type, index, ingestion_bounds=None, **query):
from datacube.api.grid_workflow import GridWorkflow
workflow = GridWorkflow(index, output_type.grid_spec)
tiles_in = workflow.list_tiles(product=input_type.name, **query)
tiles_out = workflow.list_tiles(product=output_type.name, **query)
def update_dict(d, **kwargs):
result = d.copy()
result.update(kwargs)
if ingestion_bounds is not None:
polygon = d['geobox'].geographic_extent.points
# Need to programatically figure out the upper left and lower right.
#polygon[0] = UL, polygon[2] = LR
# http://www.geeksforgeeks.org/find-two-rectangles-overlap/
# if top left x1 > bottom right x2 or top left x2 > bottom right x1
# if top left y1 < bottom right y2 or top left y2 < bottom right y1
if polygon[0][0] > ingestion_bounds['right'] or ingestion_bounds['left'] > polygon[2][0]:
return None
if polygon[0][1] < ingestion_bounds['bottom'] or ingestion_bounds['top'] < polygon[2][1]:
return None
return result
return result
tasks = [update_dict(tile, index=key) for key, tile in tiles_in.items() if key not in tiles_out]
tasks = list(filter(None, tasks))
return tasks
def morph_dataset_type(source_type, config):
output_type = DatasetType(source_type.metadata_type, deepcopy(source_type.definition))
output_type.definition['name'] = config['output_type']
output_type.definition['managed'] = True
output_type.definition['description'] = config['description']
output_type.definition['storage'] = config['storage']
output_type.metadata_doc['format'] = {'name': 'NetCDF'}
def merge_measurement(measurement, spec):
measurement.update({k: spec.get(k, measurement[k]) for k in ('name', 'nodata', 'dtype')})
return measurement
output_type.definition['measurements'] = [merge_measurement(output_type.measurements[spec['src_varname']], spec)
for spec in config['measurements']]
return output_type
def get_variable_params(config):
chunking = config['storage']['chunking']
chunking = [chunking[dim] for dim in config['storage']['dimension_order']]
variable_params = {}
for mapping in config['measurements']:
varname = mapping['name']
variable_params[varname] = {k: v for k, v in mapping.items() if k in {'zlib',
'complevel',
'shuffle',
'fletcher32',
'contiguous',
'attrs'}}
variable_params[varname]['chunksizes'] = chunking
return variable_params
def get_app_metadata(config, config_file):
doc = {
'lineage': {
'algorithm': {
'name': 'datacube-ingest',
'version': config.get('version', 'unknown'),
'repo_url': 'https://github.com/GeoscienceAustralia/datacube-ingester.git',
'parameters': {'configuration_file': config_file}
},
}
}
return doc
def get_filename(config, tile_index, sources):
file_path_template = str(Path(config['location'], config['file_path_template']))
time_format = '%Y%m%d%H%M%S%f'
return Path(file_path_template.format(
tile_index=tile_index,
start_time=to_datetime(sources.time.values[0]).strftime(time_format),
end_time=to_datetime(sources.time.values[-1]).strftime(time_format)))
def get_measurements(source_type, config):
def merge_measurement(measurement, spec):
measurement.update({k: spec.get(k) or measurement[k] for k in ('nodata', 'dtype', 'resampling_method')})
return measurement
return [merge_measurement(source_type.measurements[spec['src_varname']].copy(), spec)
for spec in config['measurements']]
def get_namemap(config):
return {spec['src_varname']: spec['name'] for spec in config['measurements']}
def make_output_type(index, config):
source_type = index.products.get_by_name(config['source_type'])
if not source_type:
click.echo("Source DatasetType %s does not exist", config['source_type'])
click.get_current_context().exit(1)
output_type = morph_dataset_type(source_type, config)
_LOG.info('Created DatasetType %s', output_type.name)
output_type = index.products.add(output_type)
return source_type, output_type
def save_tasks_to_file(config, tasks, taskfile):
with open(taskfile, 'wb') as stream:
pickler = pickle.Pickler(stream, pickle.HIGHEST_PROTOCOL)
pickler.dump(config)
for task in tasks:
pickler.dump(task)
_LOG.info('Saved config and tasks to %s', taskfile)
def stream_unpickler(taskfile):
with open(taskfile, 'rb') as stream:
unpickler = pickle.Unpickler(stream)
while True:
try:
yield unpickler.load()
except EOFError:
break
def load_tasks_from_file(taskfile):
stream = stream_unpickler(taskfile)
config = next(stream)
return config, stream
@cachetools.cached(cache={}, key=lambda index, id_: id_)
def get_full_lineage(index, id_):
return index.datasets.get(id_, include_sources=True)
def load_config_from_file(index, config):
config_name = Path(config).name
_, config = next(read_documents(Path(config)))
config['filename'] = config_name
return config
def create_task_list(index, output_type, year, source_type, config):
query = {}
if year:
query['time'] = Range(datetime(year=year, month=1, day=1), datetime(year=year+1, month=1, day=1))
bounds = None
if config['ingestion_bounds']:
bounds = config['ingestion_bounds']
tasks = find_diff(source_type, output_type, index, ingestion_bounds=bounds, **query)
_LOG.info('%s tasks discovered', len(tasks))
def update_sources(sources):
return tuple(get_full_lineage(index, dataset.id) for dataset in sources)
def update_task(task):
for i in range(task['sources'].size):
task['sources'].values[i] = update_sources(task['sources'].values[i])
return task
tasks = (update_task(task) for task in tasks)
return tasks
def ingest_work(config, source_type, output_type, index, sources, geobox):
namemap = get_namemap(config)
measurements = get_measurements(source_type, config)
variable_params = get_variable_params(config)
global_attributes = config['global_attributes']
with datacube.set_options(reproject_threads=1):
fuse_func = {'copy': None}[config.get(FUSER_KEY, 'copy')]
data = Datacube.product_data(sources, geobox, measurements, fuse_func=fuse_func)
nudata = data.rename(namemap)
file_path = get_filename(config, index, sources)
def _make_dataset(labels, sources):
sources_union = union_points(*[source.extent.to_crs(geobox.crs).points for source in sources])
valid_data = intersect_points(geobox.extent.points, sources_union)
dataset = make_dataset(dataset_type=output_type,
sources=sources,
extent=geobox.extent,
center_time=labels['time'],
uri=file_path.absolute().as_uri(),
app_info=get_app_metadata(config, config['filename']),
valid_data=GeoPolygon(valid_data, geobox.crs))
return dataset
datasets = xr_apply(sources, _make_dataset, dtype='O') # Store in Dataarray to associate Time -> Dataset
nudata['dataset'] = datasets_to_doc(datasets)
write_dataset_to_netcdf(nudata, global_attributes, variable_params, file_path)
return datasets
def process_tasks(index, config, source_type, output_type, tasks, executor):
def check_valid(task):
if FUSER_KEY in config:
return True
require_fusing = [source for source in task['sources'].values if len(source) > 1]
if require_fusing:
_LOG.warning('Skipping %s - no "%s" specified in config: %s', task['index'], FUSER_KEY, require_fusing)
return not require_fusing
results = []
successful = failed = 0
for task in tasks:
if check_valid(task):
results.append(executor.submit(ingest_work,
config=config,
source_type=source_type,
output_type=output_type,
**task))
else:
failed += 1
for result in executor.as_completed(results):
try:
datasets = executor.result(result)
for dataset in datasets.values:
index.datasets.add(dataset, skip_sources=True)
successful += 1
except Exception: # pylint: disable=broad-except
_LOG.exception('Task failed')
failed += 1
continue
return successful, failed
@cli.command('ingest', help="Ingest datasets")
@click.option('--config-file', '-c',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False),
help='Ingest configuration file')
@click.option('--year', type=click.IntRange(1960, 2060))
@click.option('--save-tasks', help='Save tasks to the specified file',
type=click.Path(exists=False))
@click.option('--load-tasks', help='Load tasks from the specified file',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
@click.option('--dry-run', '-d', is_flag=True, default=False, help='Check if everything is ok')
@ui.executor_cli_options
@ui.pass_index(app_name='agdc-ingest')
def ingest_cmd(index, config_file, year, save_tasks, load_tasks, dry_run, executor):
if config_file:
config = load_config_from_file(index, config_file)
source_type, output_type = make_output_type(index, config)
tasks = create_task_list(index, output_type, year, source_type, config)
elif load_tasks:
config, tasks = load_tasks_from_file(load_tasks)
source_type, output_type = make_output_type(index, config)
else:
click.echo('Must specify exactly one of --config-file, --load-tasks')
click.get_current_context().exit(1)
if dry_run:
for task in tasks:
click.echo('Would create %s' % get_filename(config, task['index'], task['sources']))
return
if save_tasks:
save_tasks_to_file(config, tasks, save_tasks)
return
successful, failed = process_tasks(index, config, source_type, output_type, tasks, executor)
click.echo('%d successful, %d failed' % (successful, failed))
|
ceos-seo/Data_Cube_v2
|
agdc-v2/datacube/scripts/ingest.py
|
Python
|
apache-2.0
| 11,891
|
[
"NetCDF"
] |
5e870ddae3e1e7095c3e4f17d7caeb20684a56f588ad146c3379ee80e413e5e1
|
"""
Use the greedy-terrain-decimator to display a decimated terrain view.
This example illustrates decimating a terrain. We use the
greedy-terrain-decimator to create a reduced mesh with an optimized grid that
approximates the initial regular grid.
The initial grid is displayed in white, and the optimized grid is displayed in
black, with the surface it creates. The initial grid can be seen
disappearing as it goes under the surface of the approximated grid:
although the decimated mesh follows closely the orginal, it is not
exactly the same.
One can see that the reduction in number of polygons is huge: the white
grid is much finer than the black grid. It is interesting to note that
the decimated mesh follows closely the original mesh, including in number
of polygons, in spots where the terrain changes most quickly.
This example uses the Grand Canyon topological radar data, from NASA.
The greedy-terrain-decimator is only useful to decimate a surface
warped from 2D data. To decimated more general meshes, you can use the
less-efficient decimate-pro filter (see :ref:`example_julia_set_decimation`).
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Retrieve the grand Canyon topological data ###################################
# Original file:
#'ftp://e0srp01u.ecs.nasa.gov/srtm/version2/SRTM1/Region_04/N36W113.hgt.zip'
import os
if not os.path.exists('N36W113.hgt.zip'):
# Download the data
import urllib
print 'Downloading data, please wait (10M)'
opener = urllib.urlopen(
'https://s3.amazonaws.com/storage.enthought.com/www/sample_data/N36W113.hgt.zip'
)
open('N36W113.hgt.zip', 'wb').write(opener.read())
# Load the data (signed 2 byte integers, big endian) ###########################
import zipfile
import numpy as np
data = np.fromstring(zipfile.ZipFile('N36W113.hgt.zip').read('N36W113.hgt'),
'>i2')
data.shape = (3601, 3601)
data = data[200:400, 1200:1400]
data = data.astype(np.float32)
# Plot an interecting section ##################################################
from mayavi import mlab
mlab.figure(1, size=(450, 390))
mlab.clf()
data = mlab.pipeline.array2d_source(data)
# Use a greedy_terrain_decimation to created a decimated mesh
terrain = mlab.pipeline.greedy_terrain_decimation(data)
terrain.filter.error_measure = 'number_of_triangles'
terrain.filter.number_of_triangles = 5000
terrain.filter.compute_normals = True
# Plot it black the lines of the mesh
lines = mlab.pipeline.surface(terrain, color=(0, 0, 0),
representation='wireframe')
# The terrain decimator has done the warping. We control the warping
# scale via the actor's scale.
lines.actor.actor.scale = [1, 1, 0.2]
# Display the surface itself.
surf = mlab.pipeline.surface(terrain, colormap='gist_earth',
vmin=1450, vmax=1650)
surf.actor.actor.scale = [1, 1, 0.2]
# Display the original regular grid. This time we have to use a
# warp_scalar filter.
warp = mlab.pipeline.warp_scalar(data, warp_scale=0.2)
grid = mlab.pipeline.surface(warp, color=(1, 1, 1),
representation='wireframe')
mlab.view(-17, 46, 143, [1.46, 8.46, 269.4])
mlab.show()
|
liulion/mayavi
|
examples/mayavi/mlab/canyon_decimation.py
|
Python
|
bsd-3-clause
| 3,293
|
[
"Mayavi"
] |
8b60bea759a74796a30b8499ae49a064c4c6b8e957cf3461fd05d01dcb98516d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.