text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import os
import shutil
import sys
import string
import re
import time
import file
import ut
import menu
#
# Input to conversion are:
#
# dbl/flt, : select via argv
# little/big, : pass to vtk function
# single/multi :
# num. of scalars:
# num. of vectors:
# rec_beg:
# rec_end:
#
# Input to VTK:
#
#
# size,
# number of scalars,
# input file names and offsets
# geometry
# endianity
# float/double
#
entries = []
options = []
default = []
scrh = []
outname = "flt.out"
line = file.read_lines (outname,0,0)
rec_beg = 0
rec_end = file.count_lines (outname)
scrh = []
entries.append('REC_BEG')
for n in range(rec_end): scrh.append(repr(n))
options.append(scrh)
default.append('0')
scrh = []
entries.append('REC_END')
for n in range(rec_end): scrh.append(repr(n))
options.append(scrh)
default.append(str(rec_end-1))
scrh = []
entries.append('REC_SKIP')
for n in range(rec_end): scrh.append(repr(n+1))
options.append(scrh)
default.append('1')
scrh = []
scrh = string.split(line[0])
for x in scrh[6:]:
vvect = 0
bvect = 0
y = []
if (x == "v1"):
i = scrh.index(x)
vvect = 1
if (scrh[i+1] == "v2"):
i = i + 1
vvect = 2
if (scrh[i+1] == "v3"):
vvect = 3
if (x == "b1"):
i = scrh.index(x)
bvect = 1
if (scrh[i+1] == "b2"):
i = i + 1
bvect = 2
if (scrh[i+1] == "b3"):
bvect = 3
if (x == "v2" or x == "b2"): continue
if (x == "v3" or x == "b3"): continue
if (vvect == 0 and bvect == 0):
entries.append(x)
options.append(['YES','NO'])
default.append('YES')
elif (vvect == 2):
entries.append('Velocity')
options.append(['YES','NO'])
default.append('YES')
elif (bvect == 2):
entries.append('Magnetic field')
options.append(['YES','NO'])
default.append('YES')
print entries
selection = ''
selection = menu.select(entries,options,default,'Input')
|
kylekanos/mypluto
|
Tools/Python/fileconvert.py
|
Python
|
gpl-2.0
| 1,914
|
[
"VTK"
] |
0f9ead933ef4717e64d71ccbc3c9a68f834bbbc7a5596f2f3d06129e0ddcd1a6
|
from dimerizer.alparser import atomlist_backparser as backparser
def write(sigmas,natoms,atomlist,outdir, allatoms=False, q = 0.5, temp = 300, atoffset=0):
"""
A collection plumed template for the two basic ways of running a Dimer simulation.
Check the Plumed documentation for more info about the Dimer Collective Variable.
"""
if allatoms:
satoms="ALLATOMS"
grps=""
else:
lst=range(1+atoffset,natoms+atoffset+1)
atlist2= range(natoms, natoms+len(atomlist))
d1b1=backparser(atomlist)
d1b2=backparser(atlist2)
wholemolrange="1-"+str(natoms+2*len(atomlist))
grps="""
GROUP ATOMS=%s LABEL=d1b1
GROUP ATOMS=%s LABEL=d1b2
WHOLEMOLECULES ENTITY0=%s
""" % (d1b1,d1b2,wholemolrange)
satoms="ATOMS1=d1b1 ATOMS2=d1b2"
for i,s in enumerate(sigmas):
fn=outdir+"plumed."+str(i)+".dat"
fh = open(fn,"w+")
sg="DSIGMA="+str(s)
plin="""
####### File generated by DIMERIZER #########
#######This is the replica index %d #########
##Basic configuration for a Gromacs Dimer
##simulation with virtual sites.
##To be used with modified forcefields
##that should have been generated along with this file.
##Edit with what you need.
#############################################
%s
dim: DIMER Q=%.2f TEMP=%.2f %s %s
RESTRAINT ARG=dim AT=0 KAPPA=0 SLOPE=1 LABEL=dimforces
PRINT ...
ARG=dim
STRIDE=10
FILE=colvar.string
... PRINT
""" % (i,grps,q,temp,sg,satoms)
fh.write(plin)
fh.close()
|
marckn/dimerizer
|
dimerizer/plumed/templates.py
|
Python
|
gpl-3.0
| 1,545
|
[
"Gromacs"
] |
04342e08d46356aa132ffeaa432073650da9723e148c2aa8eb2f1d90a9f6c363
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Creating an appointment system with email blast."""
def prepare_email(appointments):
"""Generates the text of an email to clients.
Args:
appointments: A list of two-item tuples with the client's name and their
appointment time as members
Returns:
A new list with the client's email body
Examples:
>>> prepare_email([('Jen', '2015'), ('Max', 'March 3')])
['Dear Jen,\nI look forward to meeting with you on 2015.\nBest,\nMe',
'Dear Max,\nI look forward to meeting with you on March 3.\nBest,\nMe']
>>> prepare_email([('Mary Moore', '12.1.2015'), ('Jacob Jones', 'May 09,
2016')])
['Dear Mary Moore,\nI look forward to meeting with you on 12.1.2015.\n
Best,\nMe', 'Dear Jacob Jones,\nI look forward to meeting with you on
May 09, 2016.\nBest,\nMe']
"""
emailbody = []
for client in appointments:
emailstr = 'Dear {},\nI look forward to meeting with you on {}.' \
'\nBest,\nMe'
emailbody.append(emailstr.format(client[0], client[1]))
return emailbody
|
slb6968/is210-week-06-synthesizing
|
task_02.py
|
Python
|
mpl-2.0
| 1,149
|
[
"BLAST"
] |
99a16027ea47ff4e0cd544b38d4d0515e3015308df90a10549d60306ef9b5926
|
import math
import random
import numpy
from base import EffectLayer, HeadsetResponsiveEffectLayer
class FireflySwarmLayer(HeadsetResponsiveEffectLayer):
"""
Each tree is a firefly. When one blinks, it pulls its neighbors closer or
further from blinking themselves, bringing the group into and out of sync.
For a full explanation of how this works, see:
Synchronization of Pulse-Coupled Biological Oscillators
Renato E. Mirollo; Steven H. Strogatz
SIAM Journal on Applied Mathematics, Vol. 50, No. 6. (Dec., 1990), pp. 1645-1662
This has a bug - it can miss blinks if update isn't called frequently enough -
but it's only apparent at unacceptably low framerates and no time to fix now.
"""
class Firefly:
"""
A single firefly. Its activation level increases monotonically in range [0,1] as
a function of time. When its activation reaches 1, it initiates a blink and drops
back to 0.
"""
CYCLE_TIME = 3 # seconds
NUDGE = 0.2 # how much to nudge it toward firing after its neighbor fires
EXP = 2.0 # exponent for phase->activation function, chosen somewhat arbitrarily
def __init__(self, tree, color=(1,1,1)):
self.offset = random.random() * self.CYCLE_TIME
self.tree = tree
self.color = color
self.blinktime = 0
def nudge(self, params, response_level):
# Bump this firefly forward or backward in its cycle, closer to or further from
# its next blink, depending on response level
p = self.phi(params)
a = self.activation(p)
response = response_level - 0.5
nudge_size = response*self.NUDGE
# if we always "desync" at same rate, it won't actually desync
if response < 0:
nudge_size *= (random.random()+0.5)
a2 = max(min(a + nudge_size, 1), 0)
# find the phase parameter corresponding to that activation level
p2 = self.activation_to_phi(a2)
# adjust time offset to bring us to that phase
self.offset += (p2 - p) * self.CYCLE_TIME
# TMI
debug=False
if self.tree == 1 and debug:
print self.offset,
print p,
print p2,
print self.phi(params),
print self.activation(self.phi(params))
# now that we've changed its state, we need to re-update it
self.update(params)
def phi(self, params):
"""
Converts current time + time offset into phi (oscillatory phase parameter in range [0,1])
"""
return ((params.time + self.offset) % self.CYCLE_TIME)/self.CYCLE_TIME + 0.01
def activation(self, phi):
"""
Converts phi into activation level. Activation function must be concave in order for
this algorithm to work.
"""
return pow(phi, 1/self.EXP)
def activation_to_phi(self, f):
""" Convert from an activation level back to a phi value. """
return pow(f, self.EXP)
def update(self, params):
"""
Note the time when activation crosses threshold, so we can use it as the onset time for rendering the
actual blink. Return whether firefly has just crossed the threshold or not so we know whether to nudge its
neighbors.
"""
p = self.phi(params)
blink = self.activation(p) >= 1
if blink:
self.blinktime = params.time
return blink
def render(self, model, params, frame):
"""
Draw pulses with sinusoidal ramp-up/ramp-down
"""
dt = params.time - self.blinktime
dur = float(self.CYCLE_TIME)/2
if dt < dur:
scale = math.sin(math.pi * dt/dur)
if self.color is None:
frame[model.edgeTree==self.tree] *= scale
else:
frame[model.edgeTree==self.tree] += self.color * scale
else:
if self.color is None:
frame[model.edgeTree==self.tree] = 0
def __init__(self, respond_to='meditation', color=None):
super(FireflySwarmLayer, self).__init__(respond_to)
self.cyclers = []
self.cachedModel = None
if color:
self.color = numpy.array(color, dtype='f')
else:
self.color = None
def render_responsive(self, model, params, frame, response_level):
if model != self.cachedModel:
self.trees = len(model.roots)
self.cyclers = [ FireflySwarmLayer.Firefly(e, color=self.color) for e in range(self.trees) ]
self.cachedModel = model
blink = self.cyclers[0].update(params)
self.cyclers[0].render(model, params, frame)
for c in self.cyclers[1:]:
if blink and response_level:
c.nudge(params, response_level)
else:
c.update(params)
c.render(model, params, frame)
|
chillpop/RELAX-HARDER
|
effects/firefly_swarm.py
|
Python
|
mit
| 5,371
|
[
"Firefly"
] |
52dee4a593a6a7a29a08b46693e5b3abb2d87a6df88d71a768d20d508a182efd
|
# coding: utf8
{
'': '',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'About': 'About',
'Account': 'Cuenta',
'Accounting': 'Contabilidad',
'Accounting period': 'Accounting period',
'Accounts plan': 'Accounts plan',
'Actions': 'Actions',
'Activate period': 'Activate period',
'Active user: ': 'Usuario activo: ',
'Add an item': 'Add an item',
'Add article': 'Ingresar artículo',
'Add check': 'Ingresar cheque',
'Add item': 'Ingresar ítem',
'Add payment method': 'Ingresar método de pago',
'Add tax': 'Ingresar impuesto',
'Address': 'Address',
'Administrative panel': 'Panel administrativo',
'Advanced': 'Avanzado',
'Agreement': 'Agreement',
'Allocate': 'Asignar',
'Allocated': 'Asignada/o',
'Amount': 'Importe',
'Appadmin': 'Appadmin',
'Apply payment': 'Apply payment',
'Archivo': 'Archivo',
'Articles': 'Artículos',
'Articles list': 'Lista de artículos',
'Assign travel': 'Assign travel <translate>',
'Auth cas': 'Auth cas',
'Auth event': 'Auth event',
'Auth group': 'Auth group',
'Auth membership': 'Auth membership',
'Auth permission': 'Auth permission',
'Auth user': 'Auth user',
'Auto apply': 'Auto-aplicar',
'Ayuda': 'Ayuda',
'Back to list': 'Volver a la lista',
'Backup': 'Copia de seguridad',
'Bank': 'Bank',
'Bank check': 'Bank check',
'Banks': 'Banks',
'Batch': 'Batch',
'Billing': 'Facturación',
'Blank for price list values': 'En blanco para valores de la lista de precios',
'Branch': 'Sucursal',
'Branches': 'Sucursales',
'Browse': 'Explorar',
'By article': 'Por artículo',
'CA': 'CC',
'CRUD': 'ABM',
'Calculate movements difference....': 'Calcular diferencia de movimientos....',
'Cancel': 'Cancel',
'Cannot be empty': 'No puede ser vacío',
'Cash': 'Caja',
'Cash balance': 'Cash balance',
'Category': 'Category',
'Change': 'Cambiar',
'Change layout colors': 'Change layout colors',
'Change location': 'Cambiar ubicación',
'Change password': 'Cambiar la contraseña',
'Change stock': 'Cambiar existencias',
'Change update taxes value to %s': 'Cambiar/actualizar valor de impuesto a %s',
'Change user': 'Cambiar el usuario',
'Checkbook': 'Checkbook',
'Checks': 'Checks',
'Choose a concept': 'Seleccionar concepto',
'Choose a document type': 'Choose a document type',
'Choose a price list': 'Elija una lista de precios',
'City': 'City',
'Client IP': 'Cliente IP',
'Closing': 'Cierre',
'Code': 'Código',
'Collection': 'Collection',
'Color': 'Color',
'Common': 'Common',
'Compras': 'Compras',
'Concept': 'Concepto',
'Contabilidad': 'Contabilidad',
'Contact': 'Contact',
'Contact Group': 'Grupo de contactos',
'Contact user': 'Contact user',
'Cost center': 'Cost center',
'Could not load the firm contact information': 'No se pudo cargar la información de contacto de empresa',
'Country': 'Country',
'Create': 'Crear',
'Create down payment': 'Create down payment <translate>',
'Create fee': 'Crear arancel',
'Create invoice': 'Crear factura',
'Create invoice batch': 'Create invoice batch',
'Create order': 'Crear pedido',
'Create payment': 'Create payment',
'Create/Edit orders': 'Crear/editar pedidos',
'Credit card': 'Tarjeta de crédito',
'Credit card coupon': 'Credit card coupon',
'Crm': 'Crm',
'Csv to db': 'Csv to db',
'Current account': 'Cuenta corriente',
'Current account calculated amount': 'Valor calculado de la cuenta corriente',
'Current account list/payments': 'Cuenta corriente: lista/pagos',
'Current account payment data': 'Información de pagos de cuenta corriente',
'Current account quotas': 'Cuotas de cuenta corriente',
'Current account report': 'Informe de cuenta corriente',
'Current accounts': 'Current accounts',
'Current accounts data': 'Current accounts data',
'Current accounts detail': 'Current accounts detail',
'Current accounts payment': 'Current accounts payment',
'Current accounts payments': 'Pagos de cuentas corrientes',
'Current accounts type': 'Current accounts type',
'Current language': 'Lenguaje actual',
'Custom serial code': 'Custom serial code',
'Customer': 'Deudor',
'Customer Panel': 'Panel de Clientes',
'Customer control panel': 'Panel de control de clientes',
'Customer current account': 'Cuenta corriente de Deudor',
'Customer current account status': 'Customer current account status',
'Customer deletion date': 'Fecha de eliminación del deuddor',
'Customer firm name': 'Razón social del deudor',
'Customer group': 'Customer group',
'Customer panel': 'Customer panel',
'Customer starting date': 'Fecha de inicio del deudor',
'Database': 'Base de datos',
'Date': 'Date',
'Db to csv': 'Db to csv',
'Deactivate access levels': 'Desactivar niveles de acceso',
'Debugging': 'Debugging',
'Default': 'Default',
'Default salesperson': 'Vendedor por defecto',
'Department': 'Department',
'Description': 'Descripción',
'Design': 'Diseño',
'Desktop App': 'Aplicación de escritorio',
'Difference: %(difference)s': 'Difference: %(difference)s',
'Difference: %s': 'Diferencia: %s',
'Discount by customer': 'Descuento por deudor',
'Discount/Surcharges': 'Descuentos/Recargos',
'Document': 'Comprobante',
'Edit': 'Editar',
'Edit movements': 'Edit movements',
'Edit order number': 'Edit order number',
'Efectivo': 'Efectivo',
'Entries': 'Entries',
'Entries: %(amounts)s': 'Entries: %(amounts)s',
'Entries: %s': 'Ingresos: %s',
'Entry': 'Entry',
'Erasing movement %(id)s': 'Erasing movement %(id)s',
'Error trying to get the operation customer/supplier data from database': 'Error trying to get the operation customer/supplier data from database',
'Existencias': 'Existencias',
'Exits: %(amounts)s': 'Exits: %(amounts)s',
'Exits: %s': 'Salidas: %s',
'Facilitate collection': 'Facilitate collection <translate>',
'False if deferred payment (df), True if paid with cash, ch (check) or current account': 'Falso si es pago diferido (df), Verdadero si el pago es en efvo., ch (cheque) o cuenta corriente',
'Family': 'Family',
'Fax': 'Fax',
'Fee': 'Fee',
'Fees': 'Fees',
'File': 'Archivo',
'File CRUD': 'ABM Archivos',
'File name': 'File name',
'Financials': 'Financials',
'Finantial situation': 'Situación financiera',
'Firm': 'Razón social',
'Firm specification successful': 'Firm specification successful',
'Fiscal controller': 'Fiscal controller',
'For PostgreSQL databases. Use this option with care. A superuser database conection is required': 'For PostgreSQL databases. Use this option with care. A superuser database conection is required',
'For purchases: %s payment is recorded as concept id %s': 'Para compras: %s pago es registrado como concepto id %s',
'Form accepted': 'Form accepted',
'Forms': 'Formularios',
'Formula': 'Formula',
'Formulas': 'Formulas',
'Fund': 'Fund',
'Funds': 'Funds',
'Generate': 'Generar',
'GestionLibre': 'GestiónLibre',
'Group ID': 'ID de grupo',
'HTML panel': 'HTML panel',
'Header form': 'Header form',
'Healthcare': 'Healthcare',
'Help': 'Ayuda',
'Hr': 'Hr',
'ID': 'ID',
'Import': 'Importar',
'Import csv dir': 'Import csv dir',
'Import example db from CSV': 'Import example db from CSV',
'Index': 'Inicio',
'Insert movements element': 'Ingresar elemento de movimientos',
'Insert order element': 'Insert order element',
'Installment': 'Installment',
'Installments': 'Planes de pago',
'Invoice header type': 'Tipo de encabezado de factura',
'Journal Entries': 'Libros diarios',
'Journal Entry': 'Libro diario',
'Journal entries': 'Libros diarios',
'Journal entry': 'Journal entry',
'Journal entry total amount': 'Suma total del libro diario',
'Jurisdiction': 'Jurisdiction',
'Label': 'Etiqueta',
'Labels': 'Labels',
'Labor union': 'Labor union',
'Languages': 'Lenguajes',
'Layout colors': 'Colores de la interfaz',
'List fees': 'List fees',
'List installments': 'List installments',
'List of operation elements': 'Lista de elementos de la operación',
'List of operations': 'Lista de operaciones',
'List of order elements': 'List of order elements',
'List order allocation operations': 'Lista de operaciones de asignaciones de pedidos',
'List order allocations': 'Lista de asignaciones de pedidos',
'Lists': 'Lists',
'Login': 'Iniciar sesión',
'Login accepted': 'Login accepted',
'Logout': 'Terminar sesión',
'Map': 'Mapeo',
'Memo': 'Memo',
'Migration': 'Migration',
'Model': 'Modelo',
'Modify header': 'Modificar encabezado',
'Modify movements element': 'Modify movements element',
'Modify operation item': 'Modify operation item',
'Modify operation number': 'Modificar número de operación',
'Modify sales order element': 'Modify sales order element',
'Movement': 'Movement',
'Movements': 'Movimientos',
'Movements (Operations)': 'Movimientos (operaciones)',
'Movements add check': 'Movements add check',
'Movements add discount surcharge': 'Movements add discount surcharge',
'Movements add item': 'Movements add item',
'Movements add payment method': 'Movements add payment method',
'Movements add tax': 'Movements add tax',
'Movements articles': 'Movements articles',
'Movements current account concept': 'Movements current account concept',
'Movements current account data': 'Movements current account data',
'Movements current account quotas': 'Movements current account quotas',
'Movements detail': 'Detalle de operación',
'Movements element': 'Movements element',
'Movements header': 'Movements header',
'Movements list': 'Lista de movimientos',
'Movements modify check': 'Movements modify check',
'Movements modify element': 'Movements modify element',
'Movements modify header': 'Movements modify header',
'Movements modify item': 'Movements modify item',
'Movements option update stock': 'Movements option update stock',
'Movements option update taxes': 'Movements option update taxes',
'Movements panel': 'Panel de movimientos',
'Movements price list': 'Movements price list',
'Movements process': 'Movements process',
'Movements process. Operation: %s': 'Registrar movimientos. Operación: %s',
'Movements select': 'Movements select',
'Movements select warehouse': 'Movements select warehouse',
'Movements start': 'Movements start',
'Name': 'Nombre',
'New customer order element': 'New customer order element',
'New customer order modify element': 'New customer order modify element',
'New expenses invoice': 'New expenses invoice',
'New function': 'New function',
'New installment': 'Nuevo plan de pago',
'New invoice': 'New invoice',
'New operation': 'Nueva operación',
'New operation (movements form)': 'Nueva operación (formulario de movimientos)',
'New operation item': 'Nuevo ítem de operación',
'New option': 'Nueva opción',
'New packing slip from this allocation': 'Nuevo remito desde esta asignación de pedidos',
'New query': 'Nueva consulta',
'No customer specified': 'No customer specified',
'No document type specified': 'No document type specified',
'None selected': 'No se seleccionó un elemento',
'Number': 'Número',
'Object or table name': 'Nombre de tabla u objeto',
'Observations': 'Observaciones',
'Operation': 'Operación',
'Operation %(operation)s is not editable': 'La operación %(operation)s no se puede editar',
'Operation %s is not editable': 'La operación %s no es editable',
'Operation detail': 'Detalle de la operación',
'Operation discounts and surcharges': 'Descuentos y recargos de la operación',
'Operation header': 'Encabezado de la operación',
'Operation header incomplete. Please select a document type': 'Operation header incomplete. Please select a document type',
'Operation installment': 'Operation installment',
'Operation modified': 'Operación modificada',
'Operation number %(id)s': 'Operation number %(id)s',
'Operation number %s': 'Número de operación %s',
'Operation processing result': 'Resultado del registro de la operación',
'Operation successfully processed': 'La operación se registró correctamente',
'Operation: %s. Amount: %s. Value: %s. Concept: %s, Quantity: %s, Movement: %s': 'Operación: %s. Importe: %s. Valor: %s. Concepto: %s, Cantidad: %s, Movimiento: %s',
'Operations': 'Operaciones',
'Operations list': 'Lista de operaciones',
'Option': 'Opción',
'Options': 'Opciones',
'Order allocation': 'Asignación de pedidos',
'Order allocation list': 'Lista de asignación de pedidos',
'Order list': 'Lista de pedidos',
'Order number': 'Order number',
'Ordered': 'Pedido/a',
'Origin': 'Origen',
'Other': 'Otros',
'Output': 'Output',
'Packing slip': 'Remito',
'Page setup': 'Configurar página',
'Parameters': 'Parámetros',
'Passages': 'Passages',
'Password reset': 'Reiniciar contraseña',
'Payment method': 'Payment method',
'Payment terms': 'Payment terms',
'Payroll': 'Payroll',
'Payroll column': 'Payroll column',
'Payroll new': 'Payroll new',
'Pension': 'Pension',
'Per item printing': 'Impresión por ítem',
'Period': 'Ciclo/Período',
'Plant': 'Plant',
"Please insert your firm's tax id": 'Por favor ingrese la identificación tributaria de su empresa',
'Please login as admin for setup options': 'Please login as admin for setup options',
'Point of sale': 'Point of sale',
'Points to order / invoice / packingslips': 'Apunta a pedidos / facturas / remitos',
'Post register specify firm': 'Post register specify firm',
'Post registration form': 'Post registration form',
'Post-registration form': 'Formulario post-registro',
'Postal address': 'Dirección postal',
'Posted': 'Registrado',
'Predefine documents': 'Predefinir comprobantes',
'Price': 'Price',
'Price check': 'Price check',
'Price list': 'Lista de precios',
'Price lists': 'Price lists',
'Prices': 'Precios',
'Print this document': 'Imprimir este documento',
'Print...': 'Impresión...',
'Process': 'Registrar',
'Process jurisdictions': 'Procesar jurisdicciones',
'Process operation': 'Registrar operación',
'Processes': 'Processes',
'Product': 'Producto',
'Product billing': 'Product billing',
'Product code': 'Código de producto',
'Product structure': 'Product structure',
'Production': 'Production',
'Purchases': 'Compras',
'Qty': 'Qty',
'Quantity': 'Cantidad',
'Queries': 'Consultas',
'Quit': 'Salir',
'Quota': 'Quota',
'RIA Create/Edit operations': 'Modo RIA crear/editar operaciones',
'RIA Product billing': 'Modo RIA facturación de productos',
'RIA Stock': 'Modo RIA existencias',
'Rate': 'Rate',
'Read': 'Read',
'Receive': 'Recibir',
'Reconciliation': 'Reconciliation',
'Record ID': 'ID del registro',
'Record deleted': 'Record deleted',
'Record updated': 'Record updated',
'Redirecting from event': 'Redirecting from event',
'Referenced table': 'Tabla referenciada',
'Register': 'Registrarse',
'Registration': 'Registration',
'Relative': 'Relative',
'Replica': 'Replica',
'Reportes': 'Reportes',
'Reports': 'Reportes',
'Reset': 'Reiniciar',
'Reset form': 'Reset form',
'Reset operation': 'Reiniciar operación',
'Reset packing slip': 'Reset packing slip',
'Reset this order': 'Reset this order',
'Revert payment application': 'Revert payment application',
'Ria movements': 'Ria movements',
'Ria movements process': 'Ria movements process',
'Ria movements reset': 'Ria movements reset',
'Ria new customer order': 'Ria new customer order',
'Ria new customer order reset': 'Ria new customer order reset',
'Ria product billing': 'Ria product billing',
'Ria product billing start': 'Ria product billing start',
'Ria stock': 'Ria stock',
'Role': 'Rol',
'Salary': 'Salary',
'Sales': 'Ventas',
'Sales contact': 'Contacto de ventas',
'Salesperson': 'Salesperson',
'Scm': 'Scm',
'Se requiere un usuario autenticado': 'Se requiere un usuario autenticado',
'Securities': 'Securities',
'Security policies': 'Políticas de seguridad',
'Select': 'Select',
'Select an operation type': 'Seleccione una clase de operación',
'Select price list': 'Selecciones una lista de precios',
'Select warehouse': 'Seleccione un depósito',
'Send': 'Enviar',
'Session closed by user input': 'Sesión finalizada por acción del usuario',
'Set colors as default': 'Establecer como colores por defecto',
'Set default layout colors': 'Set default layout colors',
'Set language': 'Set language',
'Setup': 'Configuración',
'Situation': 'Situation',
'Size': 'Size',
'Specify firm': 'Especificar razón social',
'Staff': 'Staff',
'Staff category': 'Staff category',
'State': 'State',
'Stock': 'Existencias',
'Stock item update': 'Stock item update',
'Stock list': 'Listado de existencias',
'Stock movement': 'Movimiento de existencias',
'Stock query': 'Consulta de existencias',
'Storage folder': 'Storage folder',
'Structures': 'Structures',
'Subcategory': 'Subcategory',
'Subcustomer': 'Cliente',
'Subcustomer current account': 'Cuenta corriente cliente',
'Summary': 'Summary',
'Supplier': 'Proveedor',
'System tables': 'Tablas del sistema',
'TAX ID': 'Identificación impositiva',
'Tax': 'Tax',
'Tax _id': 'Tax _id',
'Tax id': 'Clave impositiva',
'Taxes are': 'Acción para impuestos',
'Telephone numbers': 'Números telefónicos',
'Terms of payment': 'Terms of payment <translate>',
'The form has errors': 'The form has errors',
'The item will be removed without confirmation': 'Se eliminará el ítem sin confirmación',
'The links': 'Enlaces',
'The user entered does not exist': 'The user entered does not exist',
'This action requires authenticated users': 'Se requiere un usuario autenticado',
'Timestamp': 'Fecha y hora',
'Total amount': 'Monto total',
'Total debt': 'Total adeudado',
'Transfers': 'Transferencias',
'Type of current account': 'Tipo de cuenta corriente',
'Update': 'Actualización',
'Update fee': 'Update fee',
'Update installment': 'Update installment',
'Update order allocation': 'Actualizar asignación de pedido',
'Update quota': 'Update quota',
'User': 'User',
'User ID': 'ID de usuario',
'VAT sub-journal': 'Subdiario IVA',
"Valid firm tax id's": 'Identificación tributaria válida',
'Value': 'Valor',
'Various': 'Varios',
'Ventanas': 'Ventanas',
'Ventas': 'Ventas',
'Verify': 'Verificar',
'Warehouse': 'Depósito',
'Welcome %s': 'Welcome %s',
'Wiki': 'Wiki',
'Windows': 'Ventanas',
"You have not specified you firm's TAX ID. Please visit the": "You have not specified you firm's TAX ID. Please visit the",
'abbr': 'abrev',
'account': 'cuenta',
'accounting': 'accounting',
'accounting period': 'Ejercicio contable',
'accumulated': 'acumulada/o',
'addition': 'ingresado/a',
'additions': 'ingresos',
'address': 'direcciones',
'adherent': 'adherente',
'agreement': 'acuerdo',
'aliquot': 'alícuota',
'allowance': 'allowance <translate>',
'amount': 'importe',
'and try again': 'and try again',
'appadmin': 'appadmin',
'args': 'args',
'authorization code': 'código de autorización',
'avoidance': 'avoidance <translate>',
'balance': 'balance',
'balanced': 'balanceado',
'bank': 'banco',
'bank check': 'cheque',
'bank checks': 'cheques',
'banks': 'bancos',
'bd': 'bd',
'birth': 'nacimiento',
'books': 'books <translate>',
'bouncer': 'rechazado',
'branch': 'sucursal',
'budget': 'budget <translate>',
'calculate': 'calcular',
'canceled': 'cancelada/o',
'cancellation': 'cancelación',
'capacity': 'capacidad',
'cash': 'Caja',
'cash box': 'caja',
'category': 'categoría',
'check limit': 'límite de cheques',
'checkbook': 'chequera',
'city': 'ciudad',
'closed': 'cerrada/o',
'code': 'código',
'coefficient': 'coeficiente',
'collected': 'cobrada/o',
'collection': 'colección',
'collections': 'colecciones',
'color': 'color',
'commission': 'comisión',
'compress': 'comprimir',
'concept': 'concepto',
'condition': 'condición',
'confirm printing': 'confirmar impresión',
'contact': 'contacto',
'continuous': 'continuo',
'contribution': 'contribución',
'contribution discount': 'descuento por contribución',
'copies': 'copias',
'cost center': 'centro de costo',
'countable': 'contable',
'country': 'país',
'coupons': 'cupones',
'credit': 'crédito',
'crm': 'crm',
'current account': 'cuenta corriente',
'current account limit': 'límite de cuenta corriente',
'customer': 'deudor',
'customer group': 'grupo deudores',
'datum': 'datum <translate>',
'days': 'días',
'debit': 'débito',
'debt limit': 'límite de deuda',
'default': 'default',
'deletion': 'eliminación',
'department': 'departamento',
'description': 'descripción',
'descriptions': 'descripciones',
'desired': 'deseada/o',
'detail': 'detalle',
'disabled': 'deshabilitada/o',
'discount': 'descuento',
'discounts': 'descuentos',
'discriminate': 'discriminar',
'discriminated': 'discriminada/o',
'document': 'comprobante',
'document purchases': 'comprobante de compras',
'document sales': 'comprobante de ventas',
'does not update stock': 'no actualizar las existencias',
'down payment': 'down payment <translate>',
'draft': 'borrador',
'due date': 'fecha de vencimiento',
'due_date': 'fecha de vencimiento',
'email': 'email',
'ending': 'finaliza',
'ending quota': 'última cuota',
'enter a number between %(min)g and %(max)g': 'ingrese un número entre %(min)g y %(max)g',
'enter an integer between %(min)g and %(max)g': 'ingrese un entero entre %(min)g y %(max)g',
'enter from %(min)g to %(max)g characters': 'ingrese de %(min)g a %(max)g caracteres',
'entry': 'ingreso',
'exchanged': 'intercambiada/o',
'exit': 'salida',
'expenditure': 'gasto',
'extra': 'extra',
'extra hours': 'horas extra',
'extras': 'extras',
'failure': 'inasistencias',
'family': 'familia',
'fax': 'fax',
'fee': 'arancel',
'fees': 'aranceles',
'file': 'archivo',
'financials': 'financials',
'first due': 'primer vencimiento',
'first name': 'nombre',
'fiscal': 'fiscal',
'fiscal controller': 'Controlador fiscal',
'fixed': 'fija/o',
'floor': 'piso',
'form': 'formulario',
'format': 'formato',
'formula': 'fórmula',
'fund': 'fondo',
'government increase': 'aumento del gobierno',
'gross receipts': 'ingresos brutos',
'half bonus': 'medio aguinaldo',
'healthcare': 'obra social',
'hour': 'hora',
'hourly': 'horaria/o',
'id': 'id',
'id 1': 'id 1',
'id number': 'número de id',
'identity card': 'tarjeta identificatoria',
'index value': 'valor de índice',
'installment': 'plan de pago',
'interests': 'intereses',
'internal': 'interna/o',
'invert': 'invertir',
'invoice': 'factura',
'invoices': 'facturas',
'issue': 'issue <translate>',
'journal entry': 'libro diario',
'journalized': 'journalized <translate>',
'jurisdiction': 'jurisdicción',
'kinship': 'parentezco',
'labor union': 'sindicato',
'language': 'lenguaje',
'large family': 'familia numerosa',
'last name': 'apellido',
'late payment': 'pago con retraso',
'legal name': 'razón social',
'lines': 'líneas',
'liquidated': 'liquidado',
'liquidation': 'liquidación',
'lot': 'lote',
'marital status': 'estado civil',
'measure': 'unidad de medida',
'migration': 'migration',
'module': 'módulo',
'month': 'mes',
'monthly amount': 'importe mensual',
'movement': 'movimiento',
'msg': 'msg',
'multiple pages': 'múltiples páginas',
'name': 'nombre',
'nationality': 'nacionalidad',
'nationality id': 'id de nacionalidad',
'net': 'neto',
'next': 'próxima/o',
'not logged in': 'no autenticado',
'not updated': 'no actualizadar',
'notes': 'notas',
'number': 'número',
'observations': 'observaciones',
'operation': 'operación',
'operation 1': 'operación 1',
'operation 2': 'operación 2',
'operations': 'operations',
'order number': 'número de orden',
'orderable': 'asignable a pedidos',
'orders': 'pedidos',
'other': 'otras/os',
'output': 'output',
'own': 'propia/o',
'packing slips': 'remitos',
'pages': 'páginas',
'paid': 'paga/o',
'paid quotas': 'cuotas pagas',
'paid vacation': 'vacaciones pagas',
'password': 'contraseña',
'patronal': 'patronal',
'payment': 'pago',
'payment method': 'payment method <translate>',
'payment terms': 'payment terms <translate>',
'payroll': 'payroll <translate>',
'pension': 'jubilación',
'per diem': 'per diem <translate>',
'percentage': 'porcentaje',
'place of delivery': 'lugar de entrega',
'plant': 'planta',
'point of sale': 'punto de venta',
'posted': 'hora/fecha de registro',
'preprinted': 'preimpreso',
'presentation': 'presentación',
'presenteesm': 'presentismo',
'presenteesm discount': 'descuento de presentismo',
'price': 'precio',
'price list': 'lista de precios',
'printed': 'impreso',
'printer': 'impresora',
'prints': 'imprime',
'priority': 'prioridad',
'processed': 'registrado',
'products': 'productos',
'profit percentage': 'porcentaje de ganancias',
'quantity': 'cantidad',
'quantity 1': 'cantidad 1',
'quantity 2': 'cantidad 2',
'queries': 'consultas',
'quota': 'cuota',
'quotas': 'cuotas',
'rate': 'rate <translate>',
'receipt': 'recibo',
'receipts': 'recibos',
'receives': 'recibe',
'registration': 'registration',
'registration key': 'clave de registro',
'rejection': 'rechazo',
'remunerative': 'remunerativa/o',
'repair': 'reparar',
'replica': 'replica',
'replicate': 'replicar',
'replicated': 'replicada/o',
'represent': 'represent',
'requires': 'requires',
'reserved': 'reservada/o',
'reset password key': 'clave para reconfigurar contraseña',
'retentions': 'retenciones',
'role': 'rol',
'salary': 'salario',
'salesperson': 'personal de ventas',
'schedule': 'agenda',
'schooling': 'escolaridad',
'scm': 'scm',
'scrap': 'scrap <translate>',
'second due': 'segundo vencimiento',
'seniority': 'antigüedad',
'seniority years': 'años de antigüedad',
'separate': 'separada/o',
'session.difference :%s': 'session.diferencia :%s',
'setup': 'setup',
'sex': 'sexo',
'sick days': 'inasistencia por enfermedad',
'situation': 'situación',
'size': 'tamaño',
'social services': 'social services <translate>',
'source': 'fuente',
'spouse': 'esposa',
'staff': 'personal',
'staff category': 'categoría de personal',
'starting': 'comienza',
'starting quota': 'cuota inicial',
'state': 'estado',
'statement': 'statement <translate>',
'stock': 'existencias',
'stock quantity': 'cantidad en existencia',
'street': 'calle',
'subcategory': 'subcategoría',
'subcustomer': 'cliente',
'subject': 'asunto',
'supplier': 'proveedor',
'surcharge': 'recargo',
'surcharges': 'recargos',
'suspended': 'suspendida/o',
'table number': 'número de tabla',
'tax': 'impuesto',
'tax identificar': 'identificar impuesto',
'tax identification': 'clave impositiva',
'taxed': 'gravada/o',
'telephone': 'teléfono',
'term': 'término',
'text': 'texto',
'ticket': 'ticket',
'times': 'times <translate>',
'transport': 'transporte',
'type': 'tipo',
'unitary': 'unitaria/o',
'units': 'unidades',
'updated': 'actualizar',
'updates stock': 'actualizar existencias',
'upper limit': 'límite superior',
'user': 'usuario',
'vacations': 'vacaciones',
'valuation': 'valuación',
'value': 'valor',
'value already in database or empty': 'valor en la base de datos o vacío',
'value not in database': 'value not in database',
'voided': 'anulado',
'voluntary': 'voluntaria/o',
'warehouse': 'depósito',
'year': 'año',
'zip code': 'código postal',
}
|
reingart/gestionlibre.gui
|
languages/es-es.py
|
Python
|
agpl-3.0
| 26,342
|
[
"VisIt"
] |
ae849df93bc969ebbee60fd4e31f3e38d24334b0baa4c08e92b09f24caa9749c
|
import re
from copy import deepcopy
import math as m
import numpy as nu
from scipy import stats, linalg
from matplotlib import pyplot, patches
from extreme_deconvolution import extreme_deconvolution
try:
from galpy.util import bovy_plot #Latest
except ImportError:
import bovy_plot
_SQRTTWOPI= -0.5*nu.log(2.*nu.pi)
def train(data,ngauss=2,init_xdtarget=None):
"""
NAME:
train
PURPOSE:
xd train from a data set
INPUT:
data - xddata instance
ngauss - number of Gaussians to use
init_xdtarget (optional) - initial xdtarget instance (amp, mean, covar)
OUTPUT:
xdtarget instance
HISTORY:
2010-08-09 - Written - Bovy (NYU)
"""
#Initialize
if init_xdtarget is None:
initamp= nu.array([1./ngauss for ii in range(ngauss)])
datameans= nu.zeros(data.da)
datastddevs= nu.zeros(data.da)
for ii in range(data.da):
mask= (nu.isnan(data.a[:,ii]))*(nu.isinf(data.a[:,ii]))
mask= nu.array([not m for m in mask])
datameans[ii]= nu.mean(data.a[mask,ii])
datastddevs[ii]= nu.std(data.a[mask,ii])
initmean= nu.zeros((ngauss,data.da))
initcovar= nu.zeros((ngauss,data.da,data.da))
for kk in range(ngauss):
for ii in range(data.da):
initmean[kk,ii]= datameans[ii]+(2.*stats.uniform.rvs()-1.)*\
datastddevs[ii]
initcovar[kk,ii,ii]= datastddevs[ii]**2.
init_xdtarget= xdtarget(amp=initamp,mean=initmean,covar=initcovar)
#Run XD
return xd(data,init_xdtarget)
def xd(data,init_xdtarget):
initamp= init_xdtarget.amp
initmean= init_xdtarget.mean
initcovar= init_xdtarget.covar
ydata= data.a
ycovar= data.acov
if hasattr(data,'weight'):
weight= data.weight
else:
weight= None
if hasattr(data,'logweight'):
logweight= data.logweight
else:
logweight= False
extreme_deconvolution(ydata,ycovar,initamp,initmean,initcovar,
weight=weight,logweight=logweight)
out_xdtarget= xdtarget(amp=initamp,mean=initmean,covar=initcovar)
return out_xdtarget
class xdtarget:
"""class that holds the XD solution and can be used to calculate target
probabilities"""
def __init__(self,*args,**kwargs):
if len(args) > 0: #load from file
tmp_ext= re.split('\.',args[0])[-1]
if tmp_ext == 'sav': #pickle
import pickle
file= open(args[0],'rb')
tmp_self= pickle.load(file)
file.close()
self.amp= tmp_self.amp
self.mean= tmp_self.mean
self.covar= tmp_self.covar
else:
self.amp= kwargs['amp']
self.mean= kwargs['mean']
self.covar= kwargs['covar']
self.ngauss= len(self.amp)
def __call__(self,*args):
"""
NAME:
__call__
PURPOSE:
evaluate the log-probability of the input under the density model
INPUT:
Either:
1) xddata object
2) a, acov
OUTPUT:
array of log-probabilities
HISTORY:
2010-08-09 - Written - Bovy (NYU)
"""
if isinstance(args[0],xddata):
return self._eval(args[0].a,args[0].acov)
else:
return self._eval(args[0],args[1])
def sample(self,nsample=1):
"""
NAME:
sample
PURPOSE:
sample from the density
INPUT:
nsample - number of samples
OUTPUT:
array [ndata,da] of samples
HISTORY:
2010-08-09 - Written - Bovy (NYU)
"""
#First assign the samples to Gaussians
cumamp= nu.cumsum(self.amp)
comp= nu.zeros(nsample).astype('int')
for ii in range(nsample):
gauss= stats.uniform.rvs()
jj= 0
while (gauss > cumamp[jj]):
jj+= 1
comp[ii]= jj
out= []
for c in set(list(comp)):
thiscomp= comp[comp == c]
thisn= len(thiscomp)
out.extend(_sample_normal(self.mean[c,:],self.covar[c,:,:],
nsamples=thisn))
self.samples= nu.array(out).reshape((nsample,self.mean.shape[1]))
return self.samples
def scatterplot(self,d1,d2,*args,**kwargs):
"""
NAME:
scatterplot
PURPOSE:
make a scatterplot of the samples
INPUT:
d1, d2 - x and y dimension to plot
hoggscatter - if True, hogg_scatterplot
+bovy_plot.plot or bovy_plot.scatterplot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2010-08-09 - Written - Bovy (NYU)
"""
if kwargs.has_key('hoggscatter'):
hoggscatter= kwargs['hoggscatter']
kwargs.pop('hoggscatter')
else:
hoggscatter= False
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= str(d1)
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= str(d2)
if hoggscatter:
bovy_plot.scatterplot(self.samples[:,d1],self.samples[:,d2],
*args,**kwargs)
else:
bovy_plot.bovy_plot(self.samples[:,d1],self.samples[:,d2],
*args,**kwargs)
def plot(self,d1,d2,*args,**kwargs):
"""
NAME:
plot
PURPOSE:
make a plot of the solution
INPUT:
d1, d2 - x and y dimension to plot
dens - make density plot
xrange, yrange
npix, npix_x, npix_y
OUTPUT:
plot to output device
HISTORY:
2010-08-16 - Written - Bovy (NYU)
"""
if kwargs.has_key('dens') and kwargs['dens']:
dens= True
kwargs.pop('dens')
else:
dens= False
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= str(d1)
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= str(d2)
if not dens:
#Create the ellipses for the Gaussians
x= nu.zeros(self.ngauss)
y= nu.zeros(self.ngauss)
ellipses=[]
ymin, ymax= self.mean[0,d1], self.mean[0,d1]
xmin, xmax= self.mean[0,d2], self.mean[0,d2]
for ii in range(self.ngauss):
x[ii]= self.mean[ii,d1]
y[ii]= self.mean[ii,d2]
#Calculate the eigenvalues and the rotation angle
ycovar= nu.zeros((2,2))
ycovar[0,0]= self.covar[ii,d1,d1]
ycovar[1,1]= self.covar[ii,d2,d2]
ycovar[0,1]= self.covar[ii,d1,d2]
ycovar[1,0]= ycovar[0,1]
eigs= linalg.eig(ycovar)
angle= m.atan(-eigs[1][0,1]/eigs[1][1,1])/m.pi*180.
thisellipse= patches.Ellipse(nu.array([x[ii],y[ii]]),
2*nu.sqrt(eigs[0][0]),
2*nu.sqrt(eigs[0][1]),angle)
ellipses.append(thisellipse)
if (x[ii]+m.sqrt(ycovar[0,0])) > xmax:
xmax= (x[ii]+m.sqrt(ycovar[0,0]))
if (x[ii]-m.sqrt(ycovar[0,0])) < xmin:
xmin= (x[ii]-m.sqrt(ycovar[0,0]))
if (y[ii]+m.sqrt(ycovar[1,1])) > ymax:
ymax= (y[ii]+m.sqrt(ycovar[1,1]))
if (y[ii]-m.sqrt(ycovar[1,1])) < ymin:
ymin= (y[ii]-m.sqrt(ycovar[1,1]))
fig= pyplot.figure()
ax= fig.add_subplot(111)
for e in ellipses:
ax.add_artist(e)
e.set_facecolor('none')
ax.set_xlabel(kwargs['xlabel'])
ax.set_ylabel(kwargs['ylabel'])
if not kwargs.has_key('xrange'):
ax.set_xlim((xmin,xmax))
else:
ax.set_xlim((kwargs['xrange'][0],kwargs['xrange'][1]))
if not kwargs.has_key('yrange'):
ax.set_ylim((ymin,ymax))
else:
ax.set_ylim((kwargs['yrange'][0],kwargs['yrange'][1]))
else:
#Create the ellipses for the Gaussians, to determine range
x= nu.zeros(self.ngauss)
y= nu.zeros(self.ngauss)
ellipses=[]
ymin, ymax= self.mean[0,d1], self.mean[0,d1]
xmin, xmax= self.mean[0,d2], self.mean[0,d2]
for ii in range(self.ngauss):
x[ii]= self.mean[ii,d1]
y[ii]= self.mean[ii,d2]
#Calculate the eigenvalues and the rotation angle
ycovar= nu.zeros((2,2))
ycovar[0,0]= self.covar[ii,d1,d1]
ycovar[1,1]= self.covar[ii,d2,d2]
ycovar[0,1]= self.covar[ii,d1,d2]
ycovar[1,0]= ycovar[0,1]
if (x[ii]+3.*m.sqrt(ycovar[0,0])) > xmax:
xmax= (x[ii]+3.*m.sqrt(ycovar[0,0]))
if (x[ii]-3.*m.sqrt(ycovar[0,0])) < xmin:
xmin= (x[ii]-3.*m.sqrt(ycovar[0,0]))
if (y[ii]+3.*m.sqrt(ycovar[1,1])) > ymax:
ymax= (y[ii]+3.*m.sqrt(ycovar[1,1]))
if (y[ii]-3.*m.sqrt(ycovar[1,1])) < ymin:
ymin= (y[ii]-3.*m.sqrt(ycovar[1,1]))
#Get range
if not kwargs.has_key('xrange'):
kwargs['xrange']= [xmin,xmax]
if not kwargs.has_key('yrange'):
kwargs['yrange']= [ymin,ymax]
xrange= kwargs['xrange']
yrange= kwargs['yrange']
if not kwargs.has_key('npix') and not kwargs.has_key('npix_x'):
npix_x= 101
elif kwargs.has_key('npix_x'):
npix_x= kwargs['npix_x']
kwargs.pop('npix_x')
elif kwargs.has_key('npix'):
npix_x= kwargs['npix']
if not kwargs.has_key('npix') and not kwargs.has_key('npix_y'):
npix_y= 101
elif kwargs.has_key('npix_y'):
npix_y= kwargs['npix_y']
kwargs.pop('npix_y')
elif kwargs.has_key('npix'):
npix_y= kwargs['npix']
kwargs.pop('npix')
if kwargs.has_key('npix'):
kwargs.pop('npix')
#compute density
dens= nu.zeros((npix_x,npix_y))
xs= nu.linspace(xrange[0],xrange[1],npix_x)
ys= nu.linspace(yrange[0],yrange[1],npix_y)
means= nu.zeros((len(self.amp),2))
covars= nu.zeros((len(self.amp),2,2))
for kk in range(len(self.amp)):
means[kk,0]= self.mean[kk,d1]
means[kk,1]= self.mean[kk,d2]
covars[kk,0,0]= self.covar[kk,d1,d1]
covars[kk,1,0]= self.covar[kk,d1,d2]
covars[kk,0,1]= self.covar[kk,d2,d1]
covars[kk,1,1]= self.covar[kk,d2,d2]
thisxd= xdtarget(amp=self.amp,
mean= means,
covar=covars)
for ii in range(npix_x):
for jj in range(npix_y):
dens[ii,jj]= thisxd(nu.array([xs[ii],ys[jj]]).reshape((1,2)),
nu.zeros((2,2)))
dens= nu.exp(dens)
bovy_plot.bovy_dens2d(dens.T,origin='lower',cmap='gist_yarg',
**kwargs)
def save(self,filename):
"""
NAME:
save
PURPOSE:
save the xdtarget object to a file
INPUT:
filename - name of the file to save the object to
OUTPUT:
none
HISTORY:
2010-08-10 - Written - Bovy (NYU)
"""
tmp_ext= re.split('\.',filename)[-1]
if tmp_ext == 'sav': #pickle
import pickle
file= open(filename,'wb')
pickle.dump(self,file)
file.close()
def _eval(self,a,acov):
ndata= a.shape[0]
da= a.shape[1]
if len(a.shape) == len(acov.shape):
diagcovar= True
else:
diagcovar= False
out= nu.zeros(ndata)
loglike= nu.zeros(self.ngauss)
for ii in range(ndata):
for kk in range(self.ngauss):
if self.amp[kk] == 0.:
loglike[kk]= nu.finfo(nu.dtype(nu.float64)).min
continue
if diagcovar:
tinv= linalg.inv(self.covar[kk,:,:]+nu.diag(acov[ii,:]))
else:
tinv= linalg.inv(self.covar[kk,:,:]+acov[ii,:,:])
delta= a[ii,:]-self.mean[kk,:]
loglike[kk]= nu.log(self.amp[kk])+0.5*nu.log(linalg.det(tinv))\
-0.5*nu.dot(delta,nu.dot(tinv,delta))+\
da*_SQRTTWOPI
out[ii]= _logsum(loglike)
return out
class xddata:
"""Class that holds the training data
Initialize with filename (atag, acovtag) or arrays a and acov
a = [ndata,da]
acov= [ndata,da(,da)] (if diagonal 2D)
weight=, useweights=, wtag
alltags=True
"""
def __init__(self,**kwargs):
if kwargs.has_key('filename'):
tmp_ext= re.split('\.',kwargs['filename'])[-1]
if tmp_ext == 'gz':
tmp_ext= re.split('\.',kwargs['filename'])[-2]+'.'+tmp_ext
if tmp_ext == 'fit' or tmp_ext == 'fits' or \
tmp_ext == 'fit.gz' or tmp_ext == 'fits.gz':
if kwargs.has_key('atag'):
atag= kwargs['atag']
else:
atag= 'a'
if kwargs.has_key('acovtag'):
acovtag= kwargs['acovtag']
else:
acovtag= 'acov'
if kwargs.has_key('wtag'):
wtag= kwargs['wtag']
else:
wtag= 'weight'
import pyfits
hdulist= pyfits.open(kwargs['filename'])
tbdata= hdulist[1].data
self.a= nu.array(tbdata.field(atag)).astype('float64')
if acovtag.lower() in [name.lower() for name in hdulist[1].columns.names]:
self.acov= nu.array(tbdata.field(acovtag)).astype('float64')
if self.acov.shape[1] != self.a.shape[1]:
self.acov= nu.reshape(self.acov,(self.a.shape[0],self.a.shape[1],self.a.shape[1]))
else:
self.acov= nu.zeros(self.a.shape)
if kwargs.has_key('useweights') and kwargs['useweights']:
self.weight= nu.array(tbdata.field(wtag)).astype('float64')
if kwargs.has_key('alltags') and kwargs['alltags']:
tags= hdulist[1].columns.names
tmp_tags= deepcopy(tags)
popped= 0
for ii in range(len(tags)):
if tags[ii].lower() == atag.lower() or \
tags[ii].lower() == acovtag.lower():
tmp_tags.pop(ii-popped)
popped+= 1
if kwargs.has_key('useweights') and kwargs['useweights'] and tags[ii].lower() == wtag.lower():
tmp_tags.pop(ii)
tags= tmp_tags
for tag in tags:
self.__dict__[tag.lower()]= tbdata.field(tag)
self._alltags= True
self._tags= [tag.lower() for tag in tags]
else:
self._alltags= False
elif kwargs.has_key('a'):
self.a= kwargs['a']
if kwargs.has_key('acov'):
self.acov= kwargs['acov']
else:
self.acov= nu.zeros(self.a.shape)
if kwargs.has_key('weight'):
self.weight= kwargs['weight']
self._alltags= False
self._tags= None
self.da= self.a.shape[1]
def __getitem__(self,key):
if not isinstance(key,slice):
nkey= 1
else:
nkey= len(self.a[key,0])
if len(self.acov.shape) == 2:
acov= self.acov[key,:]
dacov= (nkey,self.da)
else:
acov= self.acov[key,:,:]
dacov= (nkey,self.da,self.da)
if hasattr(self,'weight'):
out= xddata(a=nu.reshape(self.a[key,:],(nkey,self.da)),
acov=nu.reshape(acov,dacov),
weight=self.weight[key])
else:
out= xddata(a=nu.reshape(self.a[key,:],(nkey,self.da)),
acov=nu.reshape(acov,dacov))
#Also transfer tags
if self._alltags:
for tag in self._tags:
thisshape= self.__dict__[tag].shape
thistag= nu.reshape(self.__dict__[tag],(thisshape[0],nu.prod(thisshape)/thisshape[0]))
tmptag= thistag[key,:]
outshape=[nkey]
nshape= len(list(thisshape))
thisshape= [thisshape[ii] for ii in range(nshape)
if ii != 0]
outshape.extend([s for s in thisshape])
outshape= tuple(outshape)
out.__dict__[tag]= nu.reshape(tmptag,outshape)
out._alltags= self._alltags
out._tags= self._tags
return out
def scatterplot(self,d1,d2,*args,**kwargs):
"""
NAME:
scatterplot
PURPOSE:
make a scatterplot of the data
INPUT:
d1, d2 - x and y dimension to plot
hoggscatter - if True, hogg_scatterplot
+bovy_plot.plot or bovy_plot.scatterplot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2010-08-09 - Written - Bovy (NYU)
"""
if kwargs.has_key('hoggscatter'):
hoggscatter= kwargs['hoggscatter']
kwargs.pop('hoggscatter')
else:
hoggscatter= False
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= str(d1)
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= str(d2)
if hoggscatter:
if hasattr(self,'weight'):
kwargs['weights']= self.weight
bovy_plot.scatterplot(self.a[:,d1],self.a[:,d2],
*args,**kwargs)
else:
bovy_plot.bovy_plot(self.a[:,d1],self.a[:,d2],
*args,**kwargs)
def _logsum(array):
"""
NAME:
_logsum
PURPOSE:
calculate the logarithm of the sum of an array of numbers,
given as a set of logs
INPUT:
array - logarithms of the numbers to be summed
OUTPUT:
logarithm of the sum of the exp of the numbers in array
REVISION HISTORY:
2009-09-29 -Written - Bovy (NYU)
"""
#For now Press' log-sum-exp because I am too lazy to implement
#my own algorithm for this
array= nu.array(array)
c= nu.amax(array)
return nu.log(nu.nansum(nu.exp(nu.add(array,-c))))+c
def _sample_normal(mean,covar,nsamples=1):
"""sample_normal: Sample a d-dimensional Gaussian distribution with
mean and covar.
Input:
mean - the mean of the Gaussian
covar - the covariance of the Gaussian
nsamples - (optional) the number of samples desired
Output:
samples; if nsamples != 1 then a list is returned
History:
2009-05-20 - Written - Bovy (NYU)
"""
p= covar.shape[0]
#First lower Cholesky of covar
L= linalg.cholesky(covar,lower=True)
if nsamples > 1:
out= []
for kk in range(nsamples):
#Generate a vector in which the elements ~N(0,1)
y= nu.zeros(p)
for ii in range(p):
y[ii]= stats.norm.rvs()
#Form the sample as Ly+mean
thissample= nu.dot(L,y)+mean
if nsamples == 1:
return thissample
else:
out.append(thissample)
return out
|
jobovy/xdtarget
|
py/xdtarget.py
|
Python
|
bsd-3-clause
| 20,520
|
[
"Gaussian"
] |
64ae67d864c2dc7da9363d8f2779c90ee1cefee4ef870b978c53ad83264f5979
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 == None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket != None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket != None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
|
rjschwei/boto
|
boto/gs/key.py
|
Python
|
mit
| 42,387
|
[
"VisIt"
] |
65b7bd464e91c12f6a8915e9204ceab91bd0ff79e40dc61afaffd44a88aaf806
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 6 11:50:10 2017
@author: Tristan Mackenzie
QNMR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
QNMR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QNMR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy.ndimage as ndi
import scipy.interpolate as interpolate
import sys
if sys.version_info[0] == 3:
import tkinter as tk
import tkinter.messagebox as msgbox
from tkinter.filedialog import askopenfilename, asksaveasfilename
else:
import Tkinter as tk
import tkMessageBox as msgbox
from tkFileDialog import askopenfilename, asksaveasfilename
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib import pyplot as plt
class GUI():
def __init__(self):
self.root = tk.Tk()
self.root.title("NMR GUI")
self.root.resizable(0,0)
sunken = dict(height = 2, bd = 1, relief = "sunken")
self.figure = plt.figure(figsize = (10, 5))
self.ax = self.figure.add_subplot(111)
self.ax.invert_xaxis()
self.peak = None
self.peaks = []
self.splitting = None
self.splittings = []
fs = self.frames = {}
cs = self.canvases = {}
ls = self.labels = {}
mes = self.machine_entries = {}
pes = self.peak_entries = {}
ses = self.splitting_entries = {}
bs = self.buttons = {}
ms = self.optionmenus = {}
fs ["machine"] = _add_frame(dict(master=self.root, text="Machine", **sunken), gk('000055news'))
ls ["frequency"] = _add_label(fs["machine"], {"text": "Operating Frequency (MHz):"}, gk('00w'))
mes["machine_frequency_mhz"] = _add_entry(fs["machine"], "", {}, gk('010300'))
ls ["noise"] = _add_label(fs["machine"], {"text": "Noise:"}, gk('10w'))
mes["noise"] = _add_entry(fs["machine"], "", {}, gk('110300'))
ls ["resolution"] = _add_label(fs["machine"], {"text": "Resolution (ppm):"}, gk('20w'))
mes["resolution"] = _add_entry(fs["machine"], "", {}, gk('210300'))
ls ["min_x"] = _add_label(fs["machine"], {"text": "Range (ppm):"}, gk('30w'))
mes["min_x"] = _add_entry(fs["machine"], "", {}, gk('31w'), {"width": 3})
ls ["max_x"] = _add_label(fs["machine"], {"text": "to:"}, gk('32w'))
mes["max_x"] = _add_entry(fs["machine"], "", {}, gk('33w'), {"width": 3})
fs["peaks"] = _add_frame(dict(master=self.root, text="Peaks", **sunken), gk('100055news'))
bs["add_peak"] = _add_button(fs["peaks"], {"text": "Add Peak"}, gk('000055w'), {"<Button-1>": self.add_peak})
bs["remove_peak"] = _add_button(fs["peaks"], {"text": "Remove Peak"}, gk('010055w'), {"<Button-1>": self.remove_peak})
ls["peaks"] = _add_label(fs["peaks"], {"text": "Peaks:"}, gk('10w'))
ms["peaks"], self.peak_string = _add_optionmenu(fs["peaks"], " ", [" "], {"command": self._update_peak_om}, gk('1103ew'), {"width": 10})
ls ["shift"] = _add_label(fs["peaks"], {"text": "Shift (ppm):"}, gk('20w'))
pes["shift"] = _add_entry(fs["peaks"], "", {}, gk('21w'), attach_func=self._set_peak_string)
ls ["p_nuclei"] = _add_label(fs["peaks"], {"text": "Nuclei:"}, gk('22w'))
pes["nuclei"] = _add_entry(fs["peaks"], "", {}, gk('23w'), attach_func=self._set_peak_string)
ls ["hwhm"] = _add_label(fs["peaks"], {"text": "Half Width Half Maximum (ppm):"}, gk('3003w'))
pes["hwhm"] = _add_entry(fs["peaks"], "", {}, gk('33w'), attach_func=self._set_peak_string)
fs["splittings"] = _add_frame(dict(master=fs["peaks"], text="Splitting Nuclei", **sunken), gk('400455news'))
bs["add_splitting"] = _add_button(fs["splittings"], {"text": "Add Splitting"}, gk('000055w'), {"<Button-1>": self.add_splitting})
bs["remove_splitting"] = _add_button(fs["splittings"], {"text": "Remove Splitting"}, gk('010055w'), {"<Button-1>": self.remove_splitting})
ls["splittings"] = _add_label(fs["splittings"], {"text": "Splittings:"}, gk('10w'))
ms["splittings"], self.splitting_string = _add_optionmenu(fs["splittings"], " ", [" "], {}, gk('1103ew'), {"width": 10})
ls ["coupling"] = _add_label(fs["splittings"], {"text": "J Coupling (Hz):"}, gk('20w'))
ses["coupling"] = _add_entry(fs["splittings"], "", {}, gk('21w'), attach_func=self._set_splitting_string)
ls ["s_nuclei"] = _add_label(fs["splittings"], {"text": "Nuclei:"}, gk('22w'))
ses["nuclei"] = _add_entry(fs["splittings"], "", {}, gk('23w'), attach_func=self._set_splitting_string)
ls ["spin"] = _add_label(fs["splittings"], {"text": "Spin:"}, gk('30w'))
ses["spin"] = _add_entry(fs["splittings"], "", {}, gk('31w'), attach_func=self._set_splitting_string)
ls ["abundance"] = _add_label(fs["splittings"], {"text": "Abundance:"}, gk('32w'))
ses["abundance"] = _add_entry(fs["splittings"], "", {}, gk('33w'), attach_func=self._set_splitting_string)
fs["controls"] = _add_frame(dict(master=self.root, text="Controls", **sunken), gk('200055news'))
bs["update"] = _add_button(fs["controls"], {"text": "Update"}, gk('000055ew') ,{"<Button-1>": self.update})
bs["parse"] = _add_button(fs["controls"], {"text": "From .log"}, gk('010055ew') ,{"<Button-1>": self.parse})
bs["export"] = _add_button(fs["controls"], {"text": "Export Data"}, gk('020055ew') ,{"<Button-1>": self.export})
fs["plot"] = _add_frame(dict(master=self.root, text="Plot", **sunken), gk('012055news'))
cs["plot"] = _add_mpl_canvas(fs["plot"], self.figure, gk('00'))
cs["plot"].get_tk_widget().grid(row=0, column=0)
fs["toolbar"] = _add_frame(dict(master=self.root, text="", **sunken), gk('210055news'))
self.toolbar = NavigationToolbar2TkAgg(cs["plot"], fs["toolbar"])
self.toolbar.grid(row=0, column=0)
self._add_nmr()
self._add_nmr_parser()
self.root.protocol("WM_DELETE_WINDOW", self._cl)
self.root.mainloop()
def _add_nmr(self, *args):
self.nmr = NMR()
for key, entry in self.machine_entries.items():
entry.configure(textvariable=getattr(self.nmr, key), state=tk.NORMAL)
def _add_nmr_parser(self, *args):
self.nmr_parser = NMRParser()
self.nmr_parser._ask_spin_abundance = self._ask_spin_abundance
def _cl(self):
plt.close('all')
self.root.destroy()
def _set_peak(self, peak, *args):
self.peak = peak
self._peak_changed()
try:
splitting = self.peak.splittings[0]
except (IndexError, AttributeError):
splitting = None
self.splittings = self.peak.splittings
self._set_splitting(splitting)
def _set_peak_string(self, *args):
self.peak_string.set(repr(self.peak))
self._update_peak_om()
def _peak_changed(self, *args):
self._set_peak_string()
self._update_peak_entries()
def _update_peak_om(self):
om = self.optionmenus["peaks"]
menu = om['menu']
menu.delete(0, tk.END)
for peak in self.peaks:
string = repr(peak)
menu.add_command(label = string, command = lambda value=peak: self._set_peak(value))
def add_peak(self, *args):
peak = Peak(self.nmr, 1, 7)
self.peaks.append(peak)
self.nmr.peaks.append(peak)
self._set_peak(peak)
def remove_peak(self, *args):
peak = self.peak
self.nmr.peaks.remove(peak)
self.peaks.remove(peak)
try:
peak = self.peaks[0]
except IndexError:
peak = None
self._set_peak(peak)
def _update_peak_entries(self, *args):
peak = self.peak
if peak:
for key, entry in self.peak_entries.items():
entry.configure(textvariable=getattr(peak, key), state=tk.NORMAL)
else:
for key, entry in self.peak_entries.items():
entry.configure(textvariable=tk.StringVar(value=""), state=tk.DISABLED)
def _set_splitting(self, splitting, *args):
self.splitting = splitting
self._splitting_changed()
def _set_splitting_string(self, *args):
self.splitting_string.set(repr(self.splitting))
self._update_splitting_om()
def _splitting_changed(self, *args):
self._set_splitting_string()
self._update_splitting_entries()
def _update_splitting_om(self):
om = self.optionmenus["splittings"]
menu = om['menu']
menu.delete(0, tk.END)
for splitting in self.splittings:
string = repr(splitting)
menu.add_command(label = string, command = lambda value=splitting: self._set_splitting(value))
def add_splitting(self, *args):
splitting = Splitting(0.5, 1, 20, 1)
self.splittings.append(splitting)
self.peak.splittings.append(splitting)
self._set_splitting(splitting)
def remove_splitting(self, *args):
s0 = self.splitting
for i, s1 in enumerate(self.peak.splittings):
if s0 == s1:
del self.peak.splittings[i]
break
for i, s1 in enumerate(self.splittings):
if s0 == s1:
del self.peak.splittings[i]
break
try:
splitting = self.splittings[0]
except IndexError:
splitting = None
self._set_splitting(splitting)
def _update_splitting_entries(self, *args):
splitting = self.splitting
if splitting:
for key, entry in self.splitting_entries.items():
entry.configure(textvariable=getattr(splitting, key), state=tk.NORMAL)
else:
for key, entry in self.splitting_entries.items():
entry.configure(textvariable=tk.StringVar(value=""), state=tk.DISABLED)
def _ask_spin_abundance(self, element):
while True:
sp = EntryPopup(self, "Input nuclear spin for element {}:".format(element))
sp.root.wait_window()
spin = sp.value
try:
spin = float(spin)
if spin % 0.5 == 0 and spin >= 0:
break
except:
pass
msgbox.showerror("Error", "Spin must be positive half-integer")
while True:
sp = EntryPopup(self, "Input abundance (0-1) for element {}:".format(element))
sp.root.wait_window()
abundance = sp.value
try:
abundance = float(abundance)
if 0 < abundance < 1:
break
except:
pass
msgbox.showerror("Error", "Abundance must be between 0 and 1")
def update(self, *args):
xs, ys = self.nmr.get_plot()
min_x = float(self.nmr.min_x.get())
max_x = float(self.nmr.max_x.get())
self.ax.clear()
self.ax.plot(xs, ys)
self.ax.set_xlim(min_x, max_x)
self.ax.set_xlabel("Chemical Shift (ppm)")
self.ax.yaxis.set_visible(False)
self.ax.invert_xaxis()
self.figure.tight_layout()
self.canvases["plot"].draw()
def parse(self, *args):
fn = askopenfilename(filetypes = (("Gaussian Log File", "*.log"), ("All Files", "*.*")))
self.nmr_parser.parse(fn)
gp = LoadGaussianPopup(self, self.nmr_parser)
gp.root.wait_window()
shifts = []
try:
gaussian_nmr_list = self.gaussian_nmr_list
self.nmr.peaks = []
self.peaks = []
self.splittings = []
for shift, splittings in gaussian_nmr_list:
shifts.append(shift)
peak = Peak(self.nmr, 1, shift)
self.peaks.append(peak)
self.nmr.peaks.append(peak)
for coupling, spin, degeneracy in splittings:
splitting = Splitting(spin, degeneracy, coupling, 1)
peak.splittings.append(splitting)
for splitting in peak.splittings:
self.splittings.append(splitting)
self._set_peak(peak)
self._set_splitting(splitting)
min_x = min(shifts)
max_x = max(shifts)
diff = max_x - min_x
self.nmr.min_x.set(round(min_x - 0.2 * diff) - 1)
self.nmr.max_x.set(round(max_x + 0.2 * diff) + 1)
self.update()
except:
msgbox.showerror("Error", "Could not load Gaussian .log File")
raise
def export(self, *args):
try:
line = self.ax.lines[0]
except IndexError:
msgbox.showerror("No Data", "No data to export!")
return
data = line.get_xydata()
fn = asksaveasfilename(filetypes = [("CSV Files", "*.csv")])
with open(fn, "w") as f:
for row in data:
f.write("{},{}\n".format(*row))
class NMR():
def __init__(self):
self.machine_frequency_mhz = tk.StringVar(value='400')
self.peaks = []
self.resolution = tk.StringVar(value='0.01')
self._epsilon = tk.StringVar(value='1e-6')
self.noise = tk.StringVar(value='0.1')
self.min_x = tk.StringVar(value='0')
self.max_x = tk.StringVar(value='12')
def get_plot(self):
min_x = float(self.min_x.get())
max_x = float(self.max_x.get())
res = float(self.resolution.get())
noise = float(self.noise.get())
xs = np.arange(min_x, max_x + res, res)
ys = np.random.random(len(xs)) * noise
for i, peak in enumerate(self.peaks):
p_xs, p_ys = peak.generate(min_x, max_x)
p_y_ints = interpolate.griddata(p_xs, p_ys, xs, method='linear')
ys += p_y_ints
return xs, ys
def __repr__(self):
return "NMR(freq={}, resolution={}, noise={}, min_x={}, max_x={}".format(
self.machine_frequency_mhz.get(),
self.resolution.get(),
self.noise.get(),
self.min_x.get(),
self.max_x.get()
)
class Peak():
def __init__(self, parent, nuclei, shift, hwhm=0.01):
self.nuclei = tk.StringVar(value=nuclei)
self.shift = tk.StringVar(value=shift)
self.hwhm = tk.StringVar(value=hwhm)
self.parent = parent
self.splittings = []
def cauchy(self, min_x, max_x):
res = float(self.parent.resolution.get())
hwhm = float(self.hwhm.get())
mf = float(self.parent.machine_frequency_mhz.get())
nuclei = float(self.nuclei.get())
shift = float(self.shift.get())
#Extend x domain to include off-chart contributions to splitting + FWHM
max_split = 2 * hwhm
for S in self.splittings:
nuclei = int(S.nuclei.get())
spin = float(S.spin.get())
coupling = float(S.coupling.get())
max_split += (coupling * (nuclei * spin + 1) / mf)
min_x -= (round(max_split / res)) * res
max_x += (round(max_split / res)) * res
xs = np.arange(min_x, max_x + res, res)
ys = []
for x in xs:
ys.append((nuclei / (np.pi * hwhm * (1 + ((x - shift) / hwhm) ** 2))))
return xs, ys
def generate(self, min_x, max_x):
res = float(self.parent.resolution.get())
epsilon = float(self.parent._epsilon.get())
mf = float(self.parent.machine_frequency_mhz.get())
xs, ys = self.cauchy(min_x, max_x)
if len(xs) == 0:
return [], []
for S in self.splittings:
nuclei = int(S.nuclei.get())
spin = float(S.spin.get())
coupling = float(S.coupling.get())
s = list(S.get_splitting())
j_split = float(coupling) / mf
max_j = (nuclei * spin) * j_split
conv_xs = np.arange(- max_j, max_j + res, res)
conv_ys = []
j = - max_j
for i, conv_x in enumerate(conv_xs):
if j - conv_x <= epsilon:
conv_ys.append(s.pop(0))
j += j_split * 0.5
else:
conv_ys.append(0.0)
ys = ndi.convolve1d(ys, conv_ys)
return xs, np.array(ys)
def __repr__(self):
return "Peak(nuclei={}, shift={:.3f}, hwhm={:.3f})".format(int(self.nuclei.get()), float(self.shift.get()), float(self.hwhm.get()))
class Splitting():
def __init__(self, spin, nuclei, coupling, abundance):
self.spin = tk.StringVar(value=spin)
self.nuclei = tk.StringVar(value=nuclei)
self.coupling = tk.StringVar(value=coupling)
self.abundance = tk.StringVar(value=abundance)
def get_splitting(self):
abundance = float(self.abundance.get())
row = self.pascal()
norm = sum(row)
row *= abundance / norm
mid = (len(row) - 1) / 2
row[mid] += 1 - abundance
return row
def pascal(self):
spin = float(self.spin.get())
nuclei = int(self.nuclei.get())
if not spin % 0.5 == 0:
raise ValueError("Spin must be divisible by 0.5")
#Number of elements
n = int(4 * spin * nuclei + 1)
prev_row = [1 if i == 2 * spin * nuclei else 0 for i in range(n)]
for nucleus in range(nuclei):
row = []
for i, element in enumerate(range(n)):
v = 0
for p_i, p_element in enumerate(prev_row):
if abs(p_i - i) <= 2 * spin and (p_i - i) % 2 == 2 * spin % 2:
v += p_element
row.append(float(v))
prev_row = row
return np.array(row)
def __repr__(self):
return "Splitting(spin={:.1f}, nuclei={}, coupling={:.3f}, abundance={:.3%})".format(
float(self.spin.get()),
int(self.nuclei.get()),
float(self.coupling.get()),
float(self.abundance.get())
)
def __eq__(self, other):
if isinstance(other, Splitting):
for a in ["spin", "nuclei", "coupling", "abundance"]:
if getattr(self, a).get() != getattr(other, a).get():
return False
return True
else:
return False
class NMRParser():
def __init__(self):
self.peak_dict = {}
self.coupling_degeneracy_threshold = 1
self.spin_dict = {
"H" : [0.5, 1],
"C" : [0.5, 0.011],
"N" : [0.5, 0.00365],
"O" : [0, 0],
"S" : [1.5, 0.0076],
"Si": [0.5, 0.047]
}
def parse(self, fn):
with open(fn, "r") as f:
lines = f.readlines()
ln = 0
n_ln = len(lines)
n_ats = 0
elements = []
shifts = []
spins = []
while ln < n_ln:
line = lines[ln]
if n_ats == 0:
if line.strip() in ["Input orientation:", "Standard orientation:"]:
ln += 5
while not lines[ln].strip().startswith('----'):
n_ats += 1
ln += 1
elif line.strip() == "SCF GIAO Magnetic shielding tensor (ppm):":
at = 0
while at < n_ats:
s_line = lines[ln].split()
skip = False
try:
at = int(s_line[0])
shifts.append(float(s_line[4]))
element = s_line[1]
elements.append(element)
except:
skip = True
if not skip:
try:
spin = self.spin_dict[element][0]
except:
spin = self._ask_spin_abundance(element)
self.spin_dict[element] = [spin, 1]
spins.append(spin)
ln += 1
elif line.strip() == "Total nuclear spin-spin coupling J (Hz):":
ln += 2
j_table = np.zeros((n_ats, n_ats))
i = j = at = 0
init_j = 0
while i < n_ats and j < n_ats:
at = 0
while at < n_ats:
j = init_j
s_line = lines[ln].split()
at += 1
try:
i = int(s_line[0]) - 1
except ValueError:
break
for j_el in s_line[1:]:
coupling = float(j_el.replace("D", "E"))
j_table[i][j] = j_table[j][i] = abs(coupling)
j += 1
if i + 1 >= n_ats:
ln += 1
break
ln += 1
ln += 1
try:
init_j = int(lines[ln].split()[0]) - 1
except ValueError:
break
ln += 1
for at in range(n_ats):
pd = {}
pd["Element"] = elements[at]
pd["Shift" ] = shifts[at]
try:
j_list = []
for j, el in enumerate(j_table[at]):
j_list.append([el, spins[j], elements[j], 1])
j_list = sorted(j_list, key = lambda x: x[0])
pd["J Coupling"] = j_list
except NameError:
pd["J Coupling"] = {}
self.peak_dict[at] = pd
def set_j_degeneracy(self):
for at, pd in self.peak_dict.items():
j_list = pd["J Coupling"]
degeneracy_j_list = []
for c, s, e, d in j_list:
if c > self.coupling_degeneracy_threshold:
skip = False
for i, (dc, ds, de, dd) in enumerate(degeneracy_j_list):
if abs(c - np.average(dc)) <= self.coupling_degeneracy_threshold and e == de:
degeneracy_j_list[i][0].append(c)
degeneracy_j_list[i][3] += 1
skip = True
break
if not skip:
degeneracy_j_list.append([[c], s, e, 1])
degeneracy_j_list = [[np.average(dc), ds, de, dd] for dc, ds, de, dd in degeneracy_j_list]
self.peak_dict[at]["J Coupling"] = degeneracy_j_list
def _ask_spin_abundance(self, element):
try:
input = raw_input
except NameError:
pass
while True:
spin = input("Input nuclear spin for element {}:".format(element))
try:
spin = float(spin)
if spin % 0.5 == 0 and spin >= 0:
break
except:
pass
print("Spin must be positive half-integer")
while True:
abundance = input("Input abundance (0-1) for element {}:".format(element))
try:
abundance = float(abundance)
if 0 <= abundance <= 1:
break
except:
pass
print("Abundance must be between 0 and 1")
return [spin, abundance]
class EntryPopup(object):
def __init__(self, parent, text):
self.root = tk.Toplevel(parent.root)
self.parent = parent
self.value = ""
self.label = _add_label(self.root, {"text": text}, gk('00'))
self.entry = _add_entry(self.root, "", {}, gk('01'))
self.ok_button = _add_button(self.root, {"text": "OK" }, gk('10'), {"<Button-1>": self._ok})
self.cancel_button = _add_button(self.root, {"text": "Cancel"}, gk('11'), {"<Button-1>": self._cancel})
self.root.protocol("WM_DELETE_WINDOW", self._cl)
def _cl(self, *args):
self.root.destroy()
def _ok(self, *args):
self.value = self.entry.get()
self.root.destroy()
def _cancel(self, *args):
self.root.destroy()
class LoadGaussianPopup(object):
def __init__(self, parent, parser):
self.root = tk.Toplevel(parent.root)
self.parent = parent
self.parser = parser
ell = []
for at, pd in parser.peak_dict.items():
element = pd['Element']
if element not in ell:
ell.append(element)
self.element_label = _add_label(self.root, {"text": "Select Element:"}, gk('00w'))
self.elements_om, self.element = _add_optionmenu(self.root, 'H' if 'H' in ell else ell[0], ell, {}, gk('01ew'))
self.reference_label = _add_label(self.root, {"text": "Reference Shift (ppm):"}, gk('10w'))
self.reference_entry = _add_entry(self.root, "", {}, gk('11w'))
self.degeneracy_label = _add_label(self.root, {"text": "Degeneracy Threshold (Hz):"}, gk('20w'))
self.degeneracy_entry = _add_entry(self.root, "1", {}, gk('21w'))
self.decouple_label = _add_label(self.root, {"text": "Decouple Elements?"}, gk('30w'))
self.decouple = tk.BooleanVar(value=True)
_add_checkbutton(self.root, True, {}, gk('31'), variable=self.decouple)
self.go_button = _add_button(self.root, {"text": "Go"}, gk('40ew'), {"<Button-1>": self.go})
self.root.protocol("WM_DELETE_WINDOW", self._cl)
def _cl(self, *args):
self.root.destroy()
def _get_ref(self):
try:
reference = float(self.reference_entry.get())
if reference > 0:
return reference
except:
pass
msgbox.showerror("Error", "Reference Shift must be a positive float")
return None
def _get_degeneracy(self):
try:
degeneracy = float(self.degeneracy_entry.get())
if degeneracy > 0:
return degeneracy
except:
pass
msgbox.showerror("Error", "Degeneracy Threshold must be a positive float")
return None
def go(self, *args):
reference = self._get_ref()
if reference is None:
return
degeneracy_threshold = self._get_degeneracy()
if degeneracy_threshold is None:
return
chosen_element = self.element.get()
decouple = self.decouple.get()
self.parser.coupling_degeneracy_threshold = degeneracy_threshold
self.parser.set_j_degeneracy()
nmr_list = []
for at, pd in self.parser.peak_dict.items():
j_list= pd['J Coupling']
shift = pd['Shift']
element = pd['Element']
if element == chosen_element:
nmr_list.append([reference - shift, [[c, s, d] for c, s, e, d in j_list if not decouple or e == element]])
self.parent.gaussian_nmr_list = nmr_list
self.root.destroy()
def gk(string):
grid = "".join([s for s in string if s.isdigit()])
sticky = "".join([s for s in string if s in "news"])
grid = grid.ljust(6, '0')
r,c,rs,cs,px,py = [int(s) for s in grid]
g = {"row": r, "column": c}
if rs: g["rowspan"] = rs
if cs: g["columnspan"] = cs
if px: g["padx"] = px
if py: g["pady"] = px
if sticky: g["sticky"] = sticky
return g
def _add_frame(frame_kwargs={}, grid_kwargs={}):
"""Insert a frame (box) into parent.
With text, a labelled frame is used"""
if "text" in frame_kwargs:
frame = tk.LabelFrame(**frame_kwargs)
else:
frame = tk.Frame(**frame_kwargs)
frame.grid(**grid_kwargs)
return frame
def _add_label(frame, text_kwargs={}, grid_kwargs={}, config_kwargs={}):
"""Insert a label"""
label = tk.Label(frame, **text_kwargs)
label.grid(**grid_kwargs)
label.config(**config_kwargs)
return label
def _add_scale(frame, val, scale_kwargs={}, grid_kwargs={}, config_kwargs={}):
"""Insert a scrollable bar"""
variable = tk.StringVar()
variable.set(val)
scale = tk.Scale(frame, **scale_kwargs)
scale.set(variable.get())
scale.grid(**grid_kwargs)
scale.config(**config_kwargs)
scale.grid_columnconfigure(0, weight = 1)
return scale
def _add_button(frame, button_kwargs={}, grid_kwargs={}, bind_kwargs={}, config_kwargs={}):
"Insert a button"""
button = tk.Button(frame, **button_kwargs)
button.grid(**grid_kwargs)
for k, v in bind_kwargs.items():
button.bind(k, v)
button.config(bg = "blue", **config_kwargs)
return button
def _add_entry(frame, val, entry_kwargs={}, grid_kwargs={}, config_kwargs={}, attach_func=None):
"""Add a text entry"""
variable = tk.StringVar()
variable.set(val)
entry = tk.Entry(frame, textvariable=variable, **entry_kwargs)
entry.bind("<FocusOut>", attach_func)
entry.grid(**grid_kwargs)
entry.config(**config_kwargs)
return entry
def _add_optionmenu(frame, val, items, optionmenu_kwargs={}, grid_kwargs={}, config_kwargs={}):
"""Add a dropdown menu"""
variable = tk.StringVar()
variable.set(val)
optionmenu = tk.OptionMenu(frame, variable, *items, **optionmenu_kwargs)
optionmenu.grid(**grid_kwargs)
optionmenu.config(**config_kwargs)
return optionmenu, variable
def _add_radio(frame, val, radio_kwargs={}, grid_kwargs={}, config_kwargs={}, variable=None):
"""Add a radio button"""
if variable is None:
variable = tk.StringVar()
variable.set(val)
radio = tk.Radiobutton(frame, variable=variable, **radio_kwargs)
radio.grid(**grid_kwargs)
radio.config(**config_kwargs)
def _add_checkbutton(frame, val, checkbutton_kwargs={}, grid_kwargs={}, config_kwargs={}, variable=None):
"""Add a radio button"""
if variable is None:
variable = tk.BooleanVar()
variable.set(val)
checkbutton = tk.Checkbutton(frame, variable=variable, **checkbutton_kwargs)
checkbutton.grid(**grid_kwargs)
checkbutton.config(**config_kwargs)
return checkbutton
def _add_mpl_canvas(frame, figure, grid_kwargs={}):
canvas = FigureCanvasTkAgg(figure, frame)
canvas.show()
widget = canvas.get_tk_widget()
widget.grid(**grid_kwargs)
return canvas
if __name__ == "__main__":
gui = GUI()
|
tam10/nmr
|
nmr.py
|
Python
|
gpl-3.0
| 33,679
|
[
"Gaussian"
] |
59a2b37fec4fb51b3930923d3c89918549420bc810356b9ded2d606506e9afa0
|
# -*- coding: utf-8 -*-
import hashlib
import json
from collections import OrderedDict as _OrderedDict
from collections.abc import Mapping as _Mapping
from mpcontribs.io.core import replacements, mp_level01_titles
# try:
# from propnet.core.quantity import Quantity
# except ImportError:
# Quantity = None
class RecursiveDict(_OrderedDict):
"""extension of dict for internal representation of MPFile"""
def rec_update(self, other=None, overwrite=False, replace_newlines=True):
"""https://gist.github.com/Xjs/114831"""
# overwrite=False: don't overwrite existing unnested key
if other is None: # mode to force RecursiveDicts to be used
other = self
overwrite = True
for key, value in other.items():
if isinstance(key, str):
key = "".join([replacements.get(c, c) for c in key])
if key in self and isinstance(self[key], dict) and isinstance(value, dict):
# ensure RecursiveDict and update key (w/o underscores)
self[key] = RecursiveDict(self[key])
replace_newlines = bool(key != mp_level01_titles[3])
self[key].rec_update(
other=value, overwrite=overwrite, replace_newlines=replace_newlines
)
elif (key in self and overwrite) or key not in self:
if isinstance(value, str) and replace_newlines:
self[key] = value.replace("\n", " ")
else:
self[key] = value
def iterate(self, nested_dict=None):
"""http://stackoverflow.com/questions/10756427/loop-through-all-nested-dictionary-values"""
d = self if nested_dict is None else nested_dict
if nested_dict is None:
self.level = 0
for key in list(d.keys()):
value = d[key]
if isinstance(value, _Mapping):
if value.get("@class") == "Structure":
from pymatgen.core import Structure
yield key, Structure.from_dict(value)
continue
yield (self.level, key), None
if value.get("@class") == "Table":
from mpcontribs.io.core.components.tdata import Table
yield key, Table.from_dict(value)
continue
# if Quantity is not None and value.get('@class') == 'Quantity':
# quantity = Quantity.from_dict(value)
# yield key, quantity
# continue
if "display" in value and "value" in value: # 'unit' is optional
yield (self.level, key), value["display"]
continue
self.level += 1
for inner_key, inner_value in self.iterate(nested_dict=value):
yield inner_key, inner_value
self.level -= 1
else:
yield (self.level, key), value
# insertion mechanism from https://gist.github.com/jaredks/6276032
def __insertion(self, link_prev, key_value):
key, value = key_value
if link_prev[2] != key:
if key in self:
del self[key]
link_next = link_prev[1]
self._OrderedDict__map[key] = link_prev[1] = link_next[0] = [
link_prev,
link_next,
key,
]
dict.__setitem__(self, key, value)
def insert_after(self, existing_key, key_value):
self.__insertion(self._OrderedDict__map[existing_key], key_value)
def insert_before(self, existing_key, key_value):
self.__insertion(self._OrderedDict__map[existing_key][0], key_value)
def insert_default_plot_options(self, pd_obj, k, update_plot_options=None):
# make default plot (add entry in 'plots') for each
# table, first column as x-column
table_name = "".join([replacements.get(c, c) for c in k])
key = "default_{}".format(table_name)
plots_dict = _OrderedDict(
[
(
mp_level01_titles[2],
_OrderedDict(
[
(
key,
_OrderedDict(
[("x", pd_obj.columns[0]), ("table", table_name)]
),
)
]
),
)
]
)
if update_plot_options is not None:
plots_dict[mp_level01_titles[2]][key].update(update_plot_options)
if mp_level01_titles[2] in self:
self.rec_update(plots_dict)
else:
self[mp_level01_titles[2]] = plots_dict[mp_level01_titles[2]]
def render(self):
"""use JsonHuman library to render a dictionary"""
jdata = json.dumps(self).replace("\\n", " ")
m = hashlib.md5()
m.update(jdata.encode("utf-8"))
divid = m.hexdigest()
html = f'<div id="{divid}" style="width:100%;"></div><script>'
html += f'render_json({{divid: "{divid}", data: {jdata}}});</script>'
return html
def _ipython_display_(self):
# TODO jupyterlab mimetype extension
from IPython.display import display_html
display_html(self.render(), raw=True)
|
materialsproject/MPContribs
|
mpcontribs-io/mpcontribs/io/core/recdict.py
|
Python
|
mit
| 5,442
|
[
"pymatgen"
] |
2486d50cfb9c72f7b3547966d00fcc32951b363ee42d60ff045232243543ccf4
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espresso
from espresso import Real3D
def writeTabFile(pot, name, N, low=0.0, high=2.5, body=2):
"""
writeTabFile can be used to create a table for any potential
Parameters are:
* pot : this is any espresso.interaction potential
* name : filename
* N : number of line to write
* low : lowest r (default is 0.0)
* high : highest r (default is 2.5)
This function has not been tested for 3 and 4 body interactions
"""
outfile = open(name, "w")
delta = (high - low) / (N - 1)
for i in range(N):
r = low + i * delta
energy = pot.computeEnergy(r)
if body == 2:# this is for 2-body potentials
force = pot.computeForce(Real3D(r, 0.0, 0.0))[0]
#force /= r
else: # this is for 3- and 4-body potentials
force = pot.computeForce(r)
outfile.write("%15.8g %15.8g %15.8g\n"%(r, energy, force))
outfile.close()
|
BackupTheBerlios/espressopp
|
src/tools/tabulated.py
|
Python
|
gpl-3.0
| 1,832
|
[
"ESPResSo"
] |
46d7d3132c8f1aa6cf426c0474f311d2d5903cf52c9083db4afac37474e577d3
|
"""
Test the about xblock
"""
import datetime
import pytz
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from nose.plugins.attrib import attr
from course_modes.models import CourseMode
from lms.djangoapps.ccx.tests.factories import CcxFactory
from shoppingcart.models import Order, PaidCourseRegistration
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory, CourseEnrollmentAllowedFactory, UserFactory
from track.tests import EventTrackingTestCase
from util.milestones_helpers import get_prerequisite_courses_display, set_prerequisite_courses
from xmodule.course_module import CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_MODULESTORE,
TEST_DATA_SPLIT_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.utils import TEST_DATA_DIR
from xmodule.modulestore.xml_importer import import_course_from_xml
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@attr(shard=1)
class AboutTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase, EventTrackingTestCase, MilestonesTestCaseMixin):
"""
Tests about xblock.
"""
@classmethod
def setUpClass(cls):
super(AboutTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
cls.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
cls.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_without_about.location,
data="WITHOUT ABOUT", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_with_about.location,
data="WITH ABOUT", display_name="overview"
)
def setUp(self):
super(AboutTestCase, self).setUp()
self.course_mode = CourseMode(
course_id=self.purchase_course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10
)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are enrolled in this course", resp.content)
self.assertIn("View Course", resp.content)
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[self.course_with_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("WITH ABOUT", resp.content)
url = reverse('about_course', args=[self.course_without_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 404)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
# should be redirected
self.assertEqual(resp.status_code, 302)
# follow this time, and check we're redirected to the course info page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
info_url = reverse('info', args=[self.course.id.to_deprecated_string()])
self.assertTrue(target_url.endswith(info_url))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course(self):
pre_requisite_course = CourseFactory.create(org='edX', course='900', display_name='pre requisite course')
course = CourseFactory.create(pre_requisite_courses=[unicode(pre_requisite_course.id)])
self.setup_user()
url = reverse('about_course', args=[unicode(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[unicode(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_about_page_unfulfilled_prereqs(self):
pre_requisite_course = CourseFactory.create(
org='edX',
course='901',
display_name='pre requisite course',
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
self.setup_user()
self.enroll(self.course, True)
self.enroll(pre_requisite_course, True)
url = reverse('about_course', args=[unicode(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[unicode(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
url = reverse('about_course', args=[unicode(pre_requisite_course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@attr(shard=1)
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Set up the tests
"""
super(AboutTestCaseXML, self).setUp()
# The following test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
self.xml_course_id = self.store.make_course_key('edX', 'detached_pages', '2014')
import_course_from_xml(
self.store,
'test_user',
TEST_DATA_DIR,
source_dirs=['2014'],
static_content_store=None,
target_id=self.xml_course_id,
raise_on_failure=True,
create_if_not_present=True,
)
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
self.xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@attr(shard=1)
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
@classmethod
def setUpClass(cls):
super(AboutWithCappedEnrollmentsTestCase, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('<a href="#" class="register">', resp.content)
self.enroll(self.course, verify=True)
# create a new account since the first account is already enrolled in the course
self.email = 'foo_second@test.com'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username, self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
# Try to enroll as well
result = self.enroll(self.course)
self.assertFalse(result)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
@attr(shard=1)
class AboutWithInvitationOnly(SharedModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
@classmethod
def setUpClass(cls):
super(AboutWithInvitationOnly, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"invitation_only": True})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
display_name="overview"
)
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='allowed_student@test.com')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'RESTRICT_ENROLL_BY_REG_METHOD': True})
class AboutTestCaseShibCourse(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
Test cases covering about page behavior for courses that use shib enrollment domain ("shib courses")
"""
@classmethod
def setUpClass(cls):
super(AboutTestCaseShibCourse, cls).setUpClass()
cls.course = CourseFactory.create(enrollment_domain="shib:https://idp.stanford.edu/")
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_logged_in_shib_course(self):
"""
For shib courses, logged in users will see the enroll button, but get rejected once they click there
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
def test_anonymous_user_shib_course(self):
"""
For shib courses, anonymous users will also see the enroll button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
@attr(shard=1)
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super(AboutWithClosedEnrollment, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_course_price_is_not_visble_in_sidebar(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# course price is not visible ihe course_about page when the course
# mode is not set to honor
self.assertNotIn('<span class="important-dates-item-text">$10</span>', resp.content)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True})
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test class runs through a suite of verifications regarding
purchaseable courses
"""
@classmethod
def setUpClass(cls):
super(AboutPurchaseCourseTestCase, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
cls.closed_course = CourseFactory.create(
org='MITx',
number='closed',
display_name='Closed Course To Buy',
enrollment_start=tomorrow,
enrollment_end=nextday
)
def setUp(self):
super(AboutPurchaseCourseTestCase, self).setUp()
self._set_ecomm(self.course)
self._set_ecomm(self.closed_course)
def _set_ecomm(self, course):
"""
Helper method to turn on ecommerce on the course
"""
course_mode = CourseMode(
course_id=course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10,
)
course_mode.save()
def test_anonymous_user(self):
"""
Make sure an anonymous user sees the purchase button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_logged_in(self):
"""
Make sure a logged in user sees the purchase button
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_already_in_cart(self):
"""
This makes sure if a user has this course in the cart, that the expected message
appears
"""
self.setup_user()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("This course is in your", resp.content)
self.assertNotIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_already_enrolled(self):
"""
This makes sure that the already enrolled message appears for paywalled courses
"""
self.setup_user()
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are enrolled in this course", resp.content)
self.assertIn("View Course", resp.content)
self.assertNotIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_closed_enrollment(self):
"""
This makes sure that paywalled courses also honor the registration
window
"""
self.setup_user()
url = reverse('about_course', args=[self.closed_course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
self.assertNotIn("Add closed to Cart <span>($10 USD)</span>", resp.content)
# course price is visible ihe course_about page when the course
# mode is set to honor and it's price is set
self.assertIn('<span class="important-dates-item-text">$10</span>', resp.content)
def test_invitation_only(self):
"""
This makes sure that the invitation only restirction takes prescendence over
any purchase enablements
"""
course = CourseFactory.create(metadata={"invitation_only": True})
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
def test_enrollment_cap(self):
"""
Make sure that capped enrollments work even with
paywalled courses
"""
course = CourseFactory.create(
metadata={
"max_student_enrollments_allowed": 1,
"display_coursenumber": "buyme",
}
)
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, course.id)
# create a new account since the first account is already enrolled in the course
email = 'foo_second@test.com'
password = 'bar'
username = 'test_second'
self.create_account(username,
email, password)
self.activate_user(email)
self.login(email, password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_free_course_display(self):
"""
Make sure other courses that don't have shopping cart enabled don't display the add-to-cart button
and don't display the course_price field if Cosmetic Price is disabled.
"""
course = CourseFactory.create(org='MITx', number='free', display_name='Course For Free')
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("Add free to Cart (Free)", resp.content)
self.assertNotIn('<p class="important-dates-item-title">Price</p>', resp.content)
class CourseAboutTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test for unenrolled student tries to access ccx.
Note: Only CCX coach can enroll a student in CCX. In sum self-registration not allowed.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(CourseAboutTestCaseCCX, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(CourseAboutTestCaseCCX, self).setUp()
# Create ccx coach account
self.coach = coach = AdminFactory.create(password="test")
self.client.login(username=coach.username, password="test")
def test_redirect_to_dashboard_unenrolled_ccx(self):
"""
Assert that when unenrolled user tries to access CCX do not allow the user to self-register.
Redirect him to his student dashboard
"""
# create ccx
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
ccx_locator = CCXLocator.from_course_locator(self.course.id, unicode(ccx.id))
self.setup_user()
url = reverse('info', args=[ccx_locator])
response = self.client.get(url)
expected = reverse('dashboard')
self.assertRedirects(response, expected, status_code=302, target_status_code=200)
|
pepeportela/edx-platform
|
lms/djangoapps/courseware/tests/test_about.py
|
Python
|
agpl-3.0
| 25,870
|
[
"VisIt"
] |
a3bb5cca93ca9287eee9f7852c2f3877de0950eb92fd625e2840cf4f1ce69a5e
|
# Copyright 2019 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A demo data set constructed with MediaSequence and MediaPipe.
This code demonstrates the steps for constructing a data set with MediaSequence.
This code has two functions. First, it can be run as a module to download and
prepare a toy dataset. Second, it can be imported and used to provide a
tf.data.Dataset reading that data from disk via as_dataset().
Running as a module prepares the data in three stages via generate_examples().
First, the actual data files are downloaded. If the download is disrupted, the
incomplete files will need to be removed before running the script again.
Second, the annotations are parsed and reformated into metadata as described in
the MediaSequence documentation. Third, MediaPipe is run to extract subsequences
of frames for subsequent training via _run_mediapipe().
The toy data set is classifying a clip as a panning shot of galaxy or nebula
from videos releasued under the [Creative Commons Attribution 4.0 International
license](http://creativecommons.org/licenses/by/4.0/) on the ESA/Hubble site.
(The use of these ESA/Hubble materials does not imply the endorsement by
ESA/Hubble or any ESA/Hubble employee of a commercial product or service.) Each
video is split into 5 or 6 ten-second clips with a label of "galaxy" or "nebula"
and downsampled to 10 frames per second. (The last clip for each test example is
only 6 seconds.) There is one video of each class in each of the training and
testing splits.
Reading the data as a tf.data.Dataset is accomplished with the following lines:
demo = DemoDataset("demo_data_path")
dataset = demo.as_dataset("test")
# implement additional processing and batching here
images_and_labels = dataset.make_one_shot_iterator().get_next()
images = images_and_labels["images"]
labels = image_and_labels["labels"]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import csv
import os
import random
import subprocess
import sys
import tempfile
from absl import app
from absl import flags
from absl import logging
from six.moves import range
from six.moves import urllib
import tensorflow.compat.v1 as tf
from mediapipe.util.sequence import media_sequence as ms
SPLITS = {
"train":
"""url,label index,label string,duration,credits
https://cdn.spacetelescope.org/archives/videos/medium_podcast/heic1608c.mp4,0,nebula,50,"ESA/Hubble; Music: Johan B. Monell"
https://cdn.spacetelescope.org/archives/videos/medium_podcast/heic1712b.mp4,1,galaxy,50,"ESA/Hubble, Digitized Sky Survey, Nick Risinger (skysurvey.org) Music: Johan B Monell"
""",
"test":
"""url,label index,label string,duration,credits
https://cdn.spacetelescope.org/archives/videos/medium_podcast/heic1301b.m4v,0,nebula,56,"NASA, ESA. Acknowledgement: Josh Lake"
https://cdn.spacetelescope.org/archives/videos/medium_podcast/heic1305b.m4v,1,galaxy,56,"NASA, ESA, Digitized Sky Survey 2. Acknowledgement: A. van der Hoeven"
"""
}
NUM_CLASSES = 2
NUM_SHARDS = 2
SECONDS_PER_EXAMPLE = 10
MICROSECONDS_PER_SECOND = 1000000
TF_RECORD_PATTERN = "demo_space_dataset_%s_tfrecord"
GRAPHS = ["clipped_images_from_file_at_24fps.pbtxt"]
class DemoDataset(object):
"""Generates and loads a demo data set."""
def __init__(self, path_to_data):
if not path_to_data:
raise ValueError("You must supply the path to the data directory.")
self.path_to_data = path_to_data
def as_dataset(self,
split,
shuffle=False,
repeat=False,
serialized_prefetch_size=32,
decoded_prefetch_size=32):
"""Returns the dataset as a tf.data.Dataset.
Args:
split: either "train" or "test"
shuffle: if true, shuffles both files and examples.
repeat: if true, repeats the data set forever.
serialized_prefetch_size: the buffer size for reading from disk.
decoded_prefetch_size: the buffer size after decoding.
Returns:
A tf.data.Dataset object with the following structure: {
"images": uint8 tensor, shape [time, height, width, channels]
"labels": one hot encoded label tensor, shape [2]
"id": a unique string id for each example, shape []
}
"""
def parse_fn(sequence_example):
"""Parses a clip classification example."""
context_features = {
ms.get_example_id_key():
ms.get_example_id_default_parser(),
ms.get_clip_label_index_key():
ms.get_clip_label_index_default_parser(),
ms.get_clip_label_string_key():
ms.get_clip_label_string_default_parser()
}
sequence_features = {
ms.get_image_encoded_key(): ms.get_image_encoded_default_parser(),
}
parsed_context, parsed_sequence = tf.io.parse_single_sequence_example(
sequence_example, context_features, sequence_features)
example_id = parsed_context[ms.get_example_id_key()]
classification_target = tf.one_hot(
tf.sparse_tensor_to_dense(
parsed_context[ms.get_clip_label_index_key()]), NUM_CLASSES)
images = tf.map_fn(
tf.image.decode_jpeg,
parsed_sequence[ms.get_image_encoded_key()],
back_prop=False,
dtype=tf.uint8)
return {
"id": example_id,
"labels": classification_target,
"images": images,
}
if split not in SPLITS:
raise ValueError("split '%s' is unknown." % split)
all_shards = tf.io.gfile.glob(
os.path.join(self.path_to_data, TF_RECORD_PATTERN % split + "-*-of-*"))
if shuffle:
random.shuffle(all_shards)
all_shards_dataset = tf.data.Dataset.from_tensor_slices(all_shards)
cycle_length = min(16, len(all_shards))
dataset = all_shards_dataset.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=cycle_length,
block_length=1,
sloppy=True,
buffer_output_elements=serialized_prefetch_size))
dataset = dataset.prefetch(serialized_prefetch_size)
if shuffle:
dataset = dataset.shuffle(serialized_prefetch_size)
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(parse_fn)
dataset = dataset.prefetch(decoded_prefetch_size)
return dataset
def generate_examples(self, path_to_mediapipe_binary,
path_to_graph_directory):
"""Downloads data and generates sharded TFRecords.
Downloads the data files, generates metadata, and processes the metadata
with MediaPipe to produce tf.SequenceExamples for training. The resulting
files can be read with as_dataset(). After running this function the
original data files can be deleted.
Args:
path_to_mediapipe_binary: Path to the compiled binary for the BUILD target
mediapipe/examples/desktop/demo:media_sequence_demo.
path_to_graph_directory: Path to the directory with MediaPipe graphs in
mediapipe/graphs/media_sequence/.
"""
if not path_to_mediapipe_binary:
raise ValueError("You must supply the path to the MediaPipe binary for "
"mediapipe/examples/desktop/demo:media_sequence_demo.")
if not path_to_graph_directory:
raise ValueError(
"You must supply the path to the directory with MediaPipe graphs in "
"mediapipe/graphs/media_sequence/.")
logging.info("Downloading data.")
tf.io.gfile.makedirs(self.path_to_data)
if sys.version_info >= (3, 0):
urlretrieve = urllib.request.urlretrieve
else:
urlretrieve = urllib.request.urlretrieve
for split in SPLITS:
reader = csv.DictReader(SPLITS[split].split("\n"))
all_metadata = []
for row in reader:
url = row["url"]
basename = url.split("/")[-1]
local_path = os.path.join(self.path_to_data, basename)
if not tf.io.gfile.exists(local_path):
urlretrieve(url, local_path)
for start_time in range(0, int(row["duration"]), SECONDS_PER_EXAMPLE):
metadata = tf.train.SequenceExample()
ms.set_example_id(bytes23(basename + "_" + str(start_time)),
metadata)
ms.set_clip_data_path(bytes23(local_path), metadata)
ms.set_clip_start_timestamp(start_time * MICROSECONDS_PER_SECOND,
metadata)
ms.set_clip_end_timestamp(
(start_time + SECONDS_PER_EXAMPLE) * MICROSECONDS_PER_SECOND,
metadata)
ms.set_clip_label_index((int(row["label index"]),), metadata)
ms.set_clip_label_string((bytes23(row["label string"]),),
metadata)
all_metadata.append(metadata)
random.seed(47)
random.shuffle(all_metadata)
shard_names = [self._indexed_shard(split, i) for i in range(NUM_SHARDS)]
writers = [tf.io.TFRecordWriter(shard_name) for shard_name in shard_names]
with _close_on_exit(writers) as writers:
for i, seq_ex in enumerate(all_metadata):
for graph in GRAPHS:
graph_path = os.path.join(path_to_graph_directory, graph)
seq_ex = self._run_mediapipe(path_to_mediapipe_binary, seq_ex,
graph_path)
writers[i % len(writers)].write(seq_ex.SerializeToString())
def _indexed_shard(self, split, index):
"""Constructs a sharded filename."""
return os.path.join(
self.path_to_data,
TF_RECORD_PATTERN % split + "-%05d-of-%05d" % (index, NUM_SHARDS))
def _run_mediapipe(self, path_to_mediapipe_binary, sequence_example, graph):
"""Runs MediaPipe over MediaSequence tf.train.SequenceExamples.
Args:
path_to_mediapipe_binary: Path to the compiled binary for the BUILD target
mediapipe/examples/desktop/demo:media_sequence_demo.
sequence_example: The SequenceExample with metadata or partial data file.
graph: The path to the graph that extracts data to add to the
SequenceExample.
Returns:
A copy of the input SequenceExample with additional data fields added
by the MediaPipe graph.
Raises:
RuntimeError: if MediaPipe returns an error or fails to run the graph.
"""
if not path_to_mediapipe_binary:
raise ValueError("--path_to_mediapipe_binary must be specified.")
input_fd, input_filename = tempfile.mkstemp()
output_fd, output_filename = tempfile.mkstemp()
cmd = [
path_to_mediapipe_binary,
"--calculator_graph_config_file=%s" % graph,
"--input_side_packets=input_sequence_example=%s" % input_filename,
"--output_side_packets=output_sequence_example=%s" % output_filename
]
with open(input_filename, "wb") as input_file:
input_file.write(sequence_example.SerializeToString())
mediapipe_output = subprocess.check_output(cmd)
if b"Failed to run the graph" in mediapipe_output:
raise RuntimeError(mediapipe_output)
with open(output_filename, "rb") as output_file:
output_example = tf.train.SequenceExample()
output_example.ParseFromString(output_file.read())
os.close(input_fd)
os.remove(input_filename)
os.close(output_fd)
os.remove(output_filename)
return output_example
def bytes23(string):
"""Creates a bytes string in either Python 2 or 3."""
if sys.version_info >= (3, 0):
return bytes(string, "utf8")
else:
return bytes(string)
@contextlib.contextmanager
def _close_on_exit(writers):
"""Call close on all writers on exit."""
try:
yield writers
finally:
for writer in writers:
writer.close()
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
DemoDataset(flags.FLAGS.path_to_demo_data).generate_examples(
flags.FLAGS.path_to_mediapipe_binary, flags.FLAGS.path_to_graph_directory)
if __name__ == "__main__":
flags.DEFINE_string("path_to_demo_data", "",
"Path to directory to write data to.")
flags.DEFINE_string("path_to_mediapipe_binary", "",
"Path to the MediaPipe run_graph_file_io_main binary.")
flags.DEFINE_string("path_to_graph_directory", "",
"Path to directory containing the graph files.")
app.run(main)
|
google/mediapipe
|
mediapipe/examples/desktop/media_sequence/demo_dataset.py
|
Python
|
apache-2.0
| 12,945
|
[
"Galaxy"
] |
bace92ec27e29bc0c1a7b563ac6dae96e38998072048b31ab456be34254d30fb
|
# -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
debridstatus = control.setting('debridsources')
from schism_commons import quality_tag, google_tag, parseDOM, replaceHTMLCodes ,cleantitle_get, cleantitle_get_2, cleantitle_query, get_size, cleantitle_get_full
class source:
def __init__(self):
self.domains = ['rlsbb.online']
self.base_link = 'http://rlsbb.online'
self.search_link = '/?s=%s+%s&submit=Find'
def movie(self, imdb, title, year):
self.genesisreborn_url = []
try:
if not debridstatus == 'true': raise Exception()
self.genesisreborn_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
titlecheck = cleanmovie+year
link = client.request(query)
r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
# print ("RLSBBONLINE r", r)
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
c_title = cleantitle_get_2(item_title)
if year in item_title:
if titlecheck in c_title:
self.genesisreborn_url.append([href,item_title])
# print "RLSBBONLINE MOVIES %s %s" % (item_title , href)
return self.genesisreborn_url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.genesisreborn_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
cleanmovie = cleantitle.get(title)
title = cleantitle.getsearch(title)
data['season'], data['episode'] = season, episode
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
mylink = urlparse.urljoin(self.base_link, query)
link = client.request(mylink)
r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
titlecheck = cleanmovie+episodecheck
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
c_title = cleantitle.get(item_title)
if titlecheck in c_title:
self.genesisreborn_url.append([href,item_title])
# print ("RLSBBONLINE TV PASSED", self.genesisreborn_url)
return self.genesisreborn_url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in self.genesisreborn_url:
mylink = client.request(movielink)
r = client.parseDOM(mylink, 'div', attrs = {'class': 'postContent'})
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
info = ''
if "hevc" in title.lower(): info = "HEVC"
for items in r:
match = re.compile('href="([^"]+)').findall(items)
for url in match:
# print ("RLSBBONLINE ULRS >>>", url)
if not any(value in url for value in ['sample','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'ul.to', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
if any(value in url for value in hostprDict):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Rlsbbonline', 'url': url, 'info': info,'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url
|
azumimuo/family-xbmc-addon
|
plugin.video.genesisreborn/resources/lib/sources/rlsbbonline.py
|
Python
|
gpl-2.0
| 5,339
|
[
"ADF"
] |
700369e24b6962f88d51c8a3810dfd46e5c663d1c56d197a060b7499bfd0889e
|
from math import sin, cos, pi
import numpy as np
from gpaw.fd_operators import NewGUCLaplace as Laplace
from gpaw.fd_operators import Gradient
from gpaw.grid_descriptor import GridDescriptor
from gpaw.mpi import size
cells = [
('distorted hexagonal', 4,
[(1, 0, 0),
(1.02 * cos(pi / 3 - 0.02), 1.02 * sin(pi / 3 - 0.02), 0),
(0, 0, 1.0)]),
('hexagonal', 4,
[(1, 0, 0),
(0.5, 3**0.5 / 2, 0),
(0, 0, 1.1)]),
('fcc', 6,
[(0, 1, 1),
(1, 0, 1),
(1, 1, 0)]),
('fcc-alternative', 6,
[(1, 0, 0),
(0.5, 3**0.5 / 2, 0),
(0.5, 3**0.5 / 6, (2.0 / 3)**0.5)]),
('bcc', 4,
[(-1, 1, 1),
(1, -1, 1),
(1, 1, -1)]),
('sc', 3,
[1.1, 1.02, 1.03]),
('distorted sc', 6,
[(1, 0, 0),
(0.01, 1, 0),
(0, 0.02, 1)]),
('rocksalt', 6,
[(2 * np.sqrt(1.0 / 3), np.sqrt(1.0 / 8), -np.sqrt(1.0/ 24)),
(2 * np.sqrt(1.0 / 3), -np.sqrt(1.0 / 8), -np.sqrt(1.0 / 24)),
(2 * np.sqrt(1.0 / 3), 0, np.sqrt(1.0 / 6))]),
('nasty', 6,
[(1, 0, 0),
(0.0001, 1.03, 0),
(0.0001, 0.0001, 1.0)]),
('Mike', 6,
5 * np.array([(5.565 / 28, 0, 0),
(0.0001 / 28, 5.565 / 28, 0),
(0.0001 / 24, 0.0001 / 24, 4.684 / 24)])),
('MnO', 6,
[(1, 0.5, 0.5), (0.5, 1, 0.5), (0.5, 0.5, 1)])
]
if size == 1:
for name, D, cell in cells:
print '------------------'
print name, D
print cell[0]
print cell[1]
print cell[2]
for n in range(1, 6):
N = 2 * n + 2
gd = GridDescriptor((N, N, N), cell)
b_g = gd.zeros()
r_gv = gd.get_grid_point_coordinates().transpose((1, 2, 3, 0))
c_v = gd.cell_cv.sum(0) / 2
r_gv -= c_v
lap = Laplace(gd, n=n)
grad_v = [Gradient(gd, v, n=n) for v in range(3)]
assert lap.npoints == D * 2 * n + 1
for m in range(0, 2 * n + 1):
for ix in range(m + 1):
for iy in range(m - ix + 1):
iz = m - ix - iy
a_g = (r_gv**(ix, iy, iz)).prod(3)
if ix + iy + iz == 2 and max(ix, iy, iz) == 2:
r = 2.0
else:
r = 0.0
lap.apply(a_g, b_g)
e = b_g[n + 1, n + 1, n + 1] - r
assert abs(e) < 2e-12, e
for v in range(3):
grad_v[v].apply(a_g, b_g)
if m == 1 and [ix, iy, iz][v] == 1:
r = 1
else:
r = 0
e = b_g[n + 1, n + 1, n + 1] - r
assert abs(e) < 2e-12, (n,ix,iy,iz,r,v,e)
|
qsnake/gpaw
|
gpaw/test/laplace.py
|
Python
|
gpl-3.0
| 2,907
|
[
"GPAW"
] |
4a6a957fc4a269a3b3bc39d7dc7e9307f03b5dbba520a3bfa7fa39310883d729
|
#!/usr/bin/env python
"""
Generate a single CA file with all the PEMs
"""
import sys
from DIRAC import gLogger
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Security import Utilities
@Script()
def main():
Script.parseCommandLine(ignoreErrors=True)
result = Utilities.generateCAFile()
if not result["OK"]:
gLogger.error(result["Message"])
sys.exit(1)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/scripts/dirac_generate_cas.py
|
Python
|
gpl-3.0
| 434
|
[
"DIRAC"
] |
37962f0904d6c2fcd2d4c7de8e67bdfe30dff7fe087ae626a2f40e5e845167a3
|
r"""
I/O Registry (:mod:`skbio.io.registry`)
=======================================
.. currentmodule:: skbio.io.registry
Classes
-------
.. autosummary::
:toctree: generated/
IORegistry
Format
Functions
---------
.. autosummary::
:toctree: generated/
create_format
Exceptions
----------
.. autosummary::
:toctree: generated/
DuplicateRegistrationError
InvalidRegistrationError
Creating a new format for scikit-bio
------------------------------------
scikit-bio makes it simple to add new file formats to its I/O registry.
scikit-bio maintains a singleton of the :class:`IORegistry` class called
`io_registry`. This is where all scikit-bio file formats are registered. One
could also instantiate their own :class:`IORegistry`, but that is not the focus
of this tutorial.
The first step to creating a new format is to add a submodule in
`skbio/io/format/` named after the file format you are implementing.
For example, if the format you are implementing is called `myformat` then you
would create a file called `skbio/io/format/myformat.py`.
The next step is to import the :func:`create_format` factory from
:mod:`skbio.io`. This will allow you to create a new :class:`Format` object
that `io_registry` will know about.
Ideally you should name the result of :func:`create_format` as your file name.
For example:
.. code-block:: python
from skbio.io import create_format
myformat = create_format('myformat')
The `myformat` object is what we will use to register our new functionality.
At this point you should evaulate whether your format is binary or text.
If your format is binary, your :func:`create_format` call should look like
this:
.. code-block:: python
myformat = create_format('myformat', encoding='binary')
Alternatively if your format is text and has a specific encoding or newline
handling you can also specify that:
.. code-block:: python
myformat = create_format('myformat', encoding='ascii', newline='\n')
This will ensure that our registry will open files with a default encoding of
`'ascii'` for `'myformat'` and expect all newlines to be `'\n'` characters.
Having worked out these details, we are ready to register the actual
functionality of our format (e.g., sniffer, readers, and writers).
To create a sniffer simply decorate the following onto your sniffer function:
.. code-block:: python
@myformat.sniffer()
def _myformat_sniffer(fh):
# do something with `fh` to determine the membership of the file
For futher details on sniffer functions see :func:`Format.sniffer`.
Creating a reader is very similar, but has one difference:
.. code-block:: python
@myformat.reader(SomeSkbioClass)
def _myformat_to_some_skbio_class(fh, kwarg1='default', extra=FileSentinel):
# parse `fh` and return a SomeSkbioClass instance here
# `extra` will also be an open filehandle if provided else None
Here we bound a function to a specific class. We also demonstrated using
our FileSentinel object to indicate to the registry that this reader can take
auxilary files that should be handled in the same way as the primary file.
For futher details on reader functions see :func:`Format.reader`.
Creating a writer is about the same:
.. code-block:: python
@myformat.writer(SomeSkbioClass)
def _some_skbio_class_to_myformat(obj, fh, kwarg1='whatever',
extra=FileSentinel):
# write the contents of `obj` into `fh` and whatever else into `extra`
# do not return anything, it will be ignored
This is exactly the same as the `reader` above just in reverse, we also
receive the object we are writing as the first parameter instead of the file
(which is the second one). For further details on writer functions see
:func:`Format.writer`.
.. note:: When raising errors in readers and writers, the error should be a
subclass of ``FileFormatError`` specific to your new format.
Once you are satisfied with the functionality, you will need to ensure that
`skbio/io/__init__.py` contains an import of your new submodule so the
decorators are executed. Add the function
``import_module('skbio.io.format.myformat')`` with your module name to the
existing list.
.. note:: Because scikit-bio handles all of the I/O boilerplate, you only need
to unit-test the actual business logic of your `readers`, `writers`, and
`sniffers`.
Reserved Keyword Arguments
--------------------------
The following keyword args may not be used when defining new `readers` or
`writers` as they already have special meaning to the registry system:
- `format`
- `into`
- `verify`
- `mode`
- `encoding`
- `errors`
- `newline`
- `compression`
- `compresslevel`
The following are not yet used but should be avoided as well:
- `auth`
- `user`
- `password`
- `buffering`
- `buffer_size`
- `closefd`
- `exclusive`
- `append`
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from warnings import warn
import types
import traceback
import itertools
import inspect
from functools import wraps
from future.builtins import zip
from ._exception import DuplicateRegistrationError, InvalidRegistrationError
from . import (UnrecognizedFormatError, ArgumentOverrideWarning,
FormatIdentificationWarning)
from .util import _resolve_file, open_file, open_files, _d as _open_kwargs
from skbio.util._misc import make_sentinel, find_sentinels
from skbio.util._decorator import stable, classonlymethod
FileSentinel = make_sentinel("FileSentinel")
class IORegistry(object):
"""Create a registry of formats and implementations which map to classes.
"""
@stable(as_of="0.4.0")
def __init__(self):
# This seperation of binary and text formats is useful because there
# are many situations where we may have recieved a text-file. When this
# happens, the binary data fundamentally does not exist. We could
# assume encoding should be interpreted in reverse, however this misses
# the bigger point: why would the user ever want text to be treated as
# binary? They already went through the effort to hand us text.
# Therefore, during format resolution, we should skip the binary
# formats if they are irrelevant. (They are incompatible with such a
# filehandle anyways.)
self._binary_formats = {}
self._text_formats = {}
self._lookups = (self._binary_formats, self._text_formats)
@stable(as_of="0.4.0")
def create_format(self, *args, **kwargs):
"""A simple factory for creating new file formats.
This will automatically register the format with this regsistry.
All arguments are passed through to the Format constructor.
Returns
-------
Format
A new format that is registered with the registry.
"""
format = Format(*args, **kwargs)
self.add_format(format)
return format
@stable(as_of="0.4.0")
def add_format(self, format_object):
"""Add a format to the registry.
Parameters
----------
format_object : Format
The format to add to the registry.
"""
# See comment in the constructor for an explanation for why this split
# occurs.
name = format_object.name
if name in self._binary_formats or name in self._text_formats:
raise DuplicateRegistrationError("A format already exists with"
" that name: %s" % name)
if format_object.is_binary_format:
self._binary_formats[name] = format_object
else:
self._text_formats[name] = format_object
@stable(as_of="0.4.0")
def get_sniffer(self, format_name):
"""Locate the sniffer for a format.
Parameters
----------
format_name : str
The name of the format to lookup.
Returns
-------
function or None
The sniffer associated with `format_name`
"""
for lookup in self._lookups:
if format_name in lookup:
return lookup[format_name].sniffer_function
return None
@stable(as_of="0.4.0")
def get_reader(self, format_name, cls):
"""Locate the reader for a format and class.
Parameters
----------
format_name : str
The name of the format to lookup.
cls : type or None
The class which the reader will return an instance of. If `cls` is
None, the reader will return a generator.
Default is None.
Returns
-------
function or None
The reader associated with `format_name` and `cls`
"""
return self._get_rw(format_name, cls, 'readers')
@stable(as_of="0.4.0")
def get_writer(self, format_name, cls):
"""Locate the writer for a format and class.
Parameters
----------
format_name : str
The name of the format to lookup.
cls : type or None
The class which the writer will expect an instance of. If `cls` is
None, the writer will expect a generator.
Default is None.
Returns
-------
function or None
The writer associated with `format_name` and `cls`
"""
return self._get_rw(format_name, cls, 'writers')
def _get_rw(self, format_name, cls, lookup_name):
for lookup in self._lookups:
if format_name in lookup:
format_lookup = getattr(lookup[format_name], lookup_name)
if cls in format_lookup:
return format_lookup[cls]
return None
@stable(as_of="0.4.0")
def list_read_formats(self, cls):
"""Return a list of available read formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what read formats exist
for an instance of `cls`.
Returns
-------
list
A list of available read formats for an instance of `cls`. List may
be empty.
"""
return list(self._iter_rw_formats(cls, 'readers'))
@stable(as_of="0.4.0")
def list_write_formats(self, cls):
"""Return a list of available write formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what write formats exist
for an instance of `cls`.
Returns
-------
list
A list of available write formats for an instance of `cls`. List
may be empty.
"""
return list(self._iter_rw_formats(cls, 'writers'))
def _iter_rw_formats(self, cls, lookup_name):
for lookup in self._lookups:
for format in lookup.values():
if cls in getattr(format, lookup_name):
yield format.name
@stable(as_of="0.4.0")
def sniff(self, file, **kwargs):
"""Detect the format of a given `file` and suggest kwargs for reading.
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The file to sniff. Something that is understood by `skbio.io.open`.
kwargs : dict, optional
Keyword arguments will be passed to `skbio.io.open`.
Returns
-------
(str, dict)
The name of the format of the file and any suggested kwargs for
use with the corresponding reader.
Raises
------
UnrecognizedFormatError
This occurs when the format is not 'claimed' by any registered
sniffer or when the format is ambiguous and has been 'claimed' by
more than one sniffer.
"""
# By resolving the input here, we have the oppurtunity to reuse the
# file (which is potentially ephemeral). Each sniffer will also resolve
# the file, but that call will short-circuit and won't claim
# responsibility for closing the file. This means that the file
# should only close after leaving this context. This is also the reason
# that we have to use SaneTextIOWrapper because each sniffer will
# wrap the file to produce an appropriate default encoding for their
# format (if unspecified). This results in the SaneTextIOWrapper being
# garbage collected (using io.TextIOBase results in close being called
# on our buffer by the deconstructor which we wanted to share with the
# next sniffer)
with _resolve_file(file, mode='r', **kwargs) as (fh, _,
is_binary_file):
# tell may fail noisily if the user provided a TextIOBase or
# BufferedReader which has already been iterated over (via next()).
matches = []
backup = fh.tell()
if is_binary_file and kwargs.get('encoding', 'binary') == 'binary':
matches = self._find_matches(fh, self._binary_formats,
**kwargs)
if kwargs.get('encoding', None) != 'binary':
# We can always turn a binary file into a text file, but the
# reverse doesn't make sense.
matches += self._find_matches(fh, self._text_formats, **kwargs)
fh.seek(backup)
elif not is_binary_file:
raise ValueError("Cannot decode text source (%r) as binary."
% file)
# else we are a binary_file and our encoding did not exclude binary
# so we have already handled that condition
if len(matches) > 1:
raise UnrecognizedFormatError("File format for %r is ambiguous,"
" may be one of: %r"
% (file, [m for m, s in matches]))
elif len(matches) == 0:
raise UnrecognizedFormatError("Could not detect the format of %r"
% file)
return matches[0]
def _find_matches(self, file, lookup, **kwargs):
matches = []
for format in lookup.values():
if format.sniffer_function is not None:
is_format, skwargs = format.sniffer_function(file, **kwargs)
file.seek(0)
if is_format:
matches.append((format.name, skwargs))
return matches
@stable(as_of="0.4.0")
def read(self, file, format=None, into=None, verify=True, **kwargs):
"""Read `file` as `format` into an object.
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The file to read. Something that is understood by `skbio.io.open`.
format : str, optional
The format of the file if known. If None, the format will be
inferred from the file.
into : type or None, optional
The object which will be returned. If None, a generator will be
returned.
verify : bool, optional
When True, will double check the `format` if provided.
kwargs : dict, optional
Keyword arguments will be passed to their respective handlers
(`skbio.io.open` and the reader for `format`)
Returns
-------
object or generator
An instance of `into` if `into` is not None else generator
Raises
------
ValueError
Raised when `format` and `into` are both None.
UnrecognizedFormatError
Raised when a reader could not be found for a given `format` or the
format could not be guessed.
FormatIdentificationWarning
Raised when `verify` is True and the sniffer of a `format` did
not agree that `file` is a member of `format`
ArgumentOverrideWarning
Raised when `verify` is True and a user-supplied argument is
overriding the suggestion provided by the sniffer of `format`.
"""
# Context managers do not compose well with generators. We have to
# duplicate the logic so that the file will stay open while yielding.
# Otherwise the context exits as soon as the generator is returned
# (making any iteration fail as the file is closed from its
# perspective).
if into is None:
if format is None:
raise ValueError("`into` and `format` cannot both be None")
gen = self._read_gen(file, format, into, verify, kwargs)
# This is done so that any errors occur immediately instead of
# on the first call from __iter__
# eta-reduction is possible, but we want to the type to be
# GeneratorType
return (x for x in itertools.chain([next(gen)], gen))
else:
return self._read_ret(file, format, into, verify, kwargs)
def _read_ret(self, file, fmt, into, verify, kwargs):
io_kwargs = self._find_io_kwargs(kwargs)
with _resolve_file(file, **io_kwargs) as (file, _, _):
reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
io_kwargs)
return reader(file, **kwargs)
def _read_gen(self, file, fmt, into, verify, kwargs):
io_kwargs = self._find_io_kwargs(kwargs)
# We needed to get the io_kwargs from kwargs for things like
# _resolve_file and for verifying a format.
# kwargs should still retain the contents of io_kwargs because the
# actual reader will also need them.
with _resolve_file(file, **io_kwargs) as (file, _, _):
reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
io_kwargs)
generator = reader(file, **kwargs)
while True:
yield next(generator)
def _find_io_kwargs(self, kwargs):
return {k: kwargs[k] for k in _open_kwargs if k in kwargs}
def _init_reader(self, file, fmt, into, verify, kwargs, io_kwargs):
skwargs = {}
if fmt is None:
fmt, skwargs = self.sniff(file, **io_kwargs)
elif verify:
sniffer = self.get_sniffer(fmt)
if sniffer is not None:
backup = file.tell()
is_format, skwargs = sniffer(file, **io_kwargs)
file.seek(backup)
if not is_format:
warn("%r does not look like a %s file"
% (file, fmt), FormatIdentificationWarning)
for key in skwargs:
if key not in kwargs:
kwargs[key] = skwargs[key]
elif kwargs[key] != skwargs[key]:
warn('Best guess was: %s=%r, continuing with user'
' supplied: %r' % (key, skwargs[key],
kwargs[key]),
ArgumentOverrideWarning)
reader = self.get_reader(fmt, into)
if reader is None:
raise UnrecognizedFormatError(
"Cannot read %r from %r, no %s reader found." %
(fmt, file, into.__name__ if into else 'generator'))
return reader, kwargs
@stable(as_of="0.4.0")
def write(self, obj, format, into, **kwargs):
"""Write `obj` as `format` into a file.
Parameters
----------
obj : object
The object to write as `format`
format : str
The format to write `obj` as
into : openable (filepath, URL, filehandle, etc.)
What to write `obj` to. Something that is understood by
`skbio.io.open`.
kwargs : dict, optional
Keyword arguments will be passed to their respective handlers
(`skbio.io.open` and the writer for `format`)
Returns
-------
openable (filepath, URL, filehandle, etc.)
Will pass back the user argument for `into` as a convenience.
Raises
------
UnrecognizedFormatError
Raised when a writer for writing `obj` as `format` could not be
found.
"""
# The simplest functionality here.
cls = None
if not isinstance(obj, types.GeneratorType):
cls = obj.__class__
writer = self.get_writer(format, cls)
if writer is None:
raise UnrecognizedFormatError(
"Cannot write %r into %r, no %s writer found." %
(format, into, obj.__class__.__name__))
writer(obj, into, **kwargs)
return into
@stable(as_of="0.4.0")
def monkey_patch(self):
"""Monkey-patch `read` and `write` methods onto registered classes.
Will modify classes which have been registered to a reader or writer
to have `read` and `write` methods which will contain documentation
specifying useable formats for that class.
The actual functionality will be a pass-through to `skbio.io.read`
and `skbio.io.write` respectively.
"""
reads = set()
writes = set()
for lookup in self._lookups:
for format in lookup.values():
reads |= format.monkey_patched_readers
writes |= format.monkey_patched_writers
for cls in reads:
self._apply_read(cls)
for cls in writes:
self._apply_write(cls)
def _apply_read(registry, cls):
"""Add read method if any formats have a reader for `cls`."""
read_formats = registry.list_read_formats(cls)
@classonlymethod
def read(cls, file, format=None, **kwargs):
return registry.read(file, into=cls, format=format, **kwargs)
imports = registry._import_paths(read_formats)
doc_list = registry._formats_for_docs(read_formats, imports)
read.__func__.__doc__ = _read_docstring % {
'name': cls.__name__,
'list': doc_list,
'see': '\n'.join(imports)
}
cls.read = read
def _apply_write(registry, cls):
"""Add write method if any formats have a writer for `cls`."""
write_formats = registry.list_write_formats(cls)
if not hasattr(cls, 'default_write_format'):
raise NotImplementedError(
"Classes with registered writers must provide a "
"`default_write_format`. Please add `default_write_format`"
" to '%s'." % cls.__name__)
def write(self, file, format=cls.default_write_format, **kwargs):
return registry.write(self, into=file, format=format, **kwargs)
imports = registry._import_paths(write_formats)
doc_list = registry._formats_for_docs(write_formats, imports)
write.__doc__ = _write_docstring % {
'name': cls.__name__,
'list': doc_list,
'see': '\n'.join(imports),
'default': cls.default_write_format
}
cls.write = write
def _import_paths(self, formats):
lines = []
for fmt in formats:
lines.append("skbio.io.format." + fmt)
return lines
def _formats_for_docs(self, formats, imports):
lines = []
for fmt, imp in zip(formats, imports):
lines.append("- ``'%s'`` (:mod:`%s`)" % (fmt, imp))
return '\n'.join(lines)
_read_docstring = """Create a new ``%(name)s`` instance from a file.
This is a convenience method for :func:`skbio.io.registry.read`. For
more information about the I/O system in scikit-bio, please see
:mod:`skbio.io`.
Supported file formats include:
%(list)s
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The location to read the given `format`. Something that is
understood by :func:`skbio.io.util.open`. Filehandles are not
automatically closed, it is the responsibility of the caller.
format : str, optional
The format must be a format name with a reader for ``%(name)s``.
If a `format` is not provided or is None, it will attempt to
guess the format.
kwargs : dict, optional
Keyword arguments passed to :func:`skbio.io.registry.read` and
the file format reader for ``%(name)s``.
Returns
-------
%(name)s
A new instance.
See Also
--------
write
skbio.io.registry.read
skbio.io.util.open
%(see)s
"""
_write_docstring = """Write an instance of ``%(name)s`` to a file.
This is a convenience method for :func:`skbio.io.registry.write`.
For more information about the I/O system in scikit-bio, please
see :mod:`skbio.io`.
Supported file formats include:
%(list)s
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The location to write the given `format` into. Something
that is understood by :func:`skbio.io.util.open`. Filehandles
are not automatically closed, it is the responsibility of the
caller.
format : str
The format must be a registered format name with a writer for
``%(name)s``.
Default is `'%(default)s'`.
kwargs : dict, optional
Keyword arguments passed to :func:`skbio.io.registry.write`
and the file format writer.
See Also
--------
read
skbio.io.registry.write
skbio.io.util.open
%(see)s
"""
class Format(object):
"""Defines a format on which readers/writers/sniffer can be registered.
Parameters
----------
name : str
The name of this format.
encoding : str, optional
What the default encoding of this format is. If set to 'binary' then
all registered handlers will receive an :class:`io.BufferedReader` or
:class:`io.BufferedWriter` instead of an :class:`io.TextIOBase`. The
user will also be unable to override the encoding in that case.
newline : str, optional
What the default newline handling of this format is. Default is to use
universal newline handling.
"""
@property
@stable(as_of="0.4.0")
def name(self):
"""The name of this format."""
return self._name
@property
@stable(as_of="0.4.0")
def is_binary_format(self):
"""Return True if this is a binary format."""
return self._encoding == 'binary'
@property
@stable(as_of="0.4.0")
def sniffer_function(self):
"""The sniffer function associated with this format."""
return self._sniffer_function
@property
@stable(as_of="0.4.0")
def readers(self):
"""Dictionary that maps classes to their writers for this format."""
return self._readers
@property
@stable(as_of="0.4.0")
def writers(self):
"""Dictionary that maps classes to their writers for this format."""
return self._writers
@property
@stable(as_of="0.4.0")
def monkey_patched_readers(self):
"""Set of classes bound to readers to monkey patch."""
return self._monkey_patch['read']
@property
@stable(as_of="0.4.0")
def monkey_patched_writers(self):
"""Set of classes bound to writers to monkey patch."""
return self._monkey_patch['write']
def __init__(self, name, encoding=None, newline=None):
self._encoding = encoding
self._newline = newline
self._name = name
self._sniffer_function = None
self._readers = {}
self._writers = {}
self._monkey_patch = {'read': set(), 'write': set()}
@stable(as_of="0.4.0")
def sniffer(self, override=False):
"""Decorate a function to act as the sniffer for this format.
The function should take one argument which will be an implementation
of either :class:`io.TextIOBase` or :class:`io.BufferedReader`
depending on if the format is text or binary, respectively.
The sniffer will always receive a filehandle which is pointing to the
beginning of the file. It must return a tuple of bool and a dict of
suggested keyword arguments (if any) to pass to the reader.
.. note:: Keyword arguments are not permitted in `sniffers`.
`Sniffers` may not raise exceptions; if an exception is thrown by a
`sniffer`, the user will be asked to report it on our `issue tracker
<https://github.com/biocore/scikit-bio/issues/>`_.
Parameters
----------
override : bool, optional
If True, the existing sniffer will be overriden.
Raises
------
DuplicateRegistrationError
When `override` is False and a sniffer is already registered for
this format.
Examples
--------
>>> from skbio.io.registry import Format
>>> # If developing a new format for skbio, use the create_format()
>>> # factory instead of this constructor.
>>> myformat = Format('myformat')
>>> @myformat.sniffer()
... def myformat_sniffer(fh):
... check = fh.read(8) == "myformat"
... if check:
... version = int(fh.read(1))
... return True, {'version': version}
... return False, {}
...
>>> myformat_sniffer(["myformat2\\n", "some content\\n"])
(True, {'version': 2})
>>> myformat_sniffer(["something else\\n"])
(False, {})
"""
if not type(override) is bool:
raise InvalidRegistrationError("`override` must be a bool not %r"
% override)
if not override and self._sniffer_function is not None:
raise DuplicateRegistrationError("A sniffer is already registered"
" to format: %s" % self._name)
def decorator(sniffer):
@wraps(sniffer)
def wrapped_sniffer(file, encoding=self._encoding, errors='ignore',
newline=self._newline, **kwargs):
self._validate_encoding(encoding)
if encoding == 'binary':
# Errors is irrelevant so set to default to prevent raising
# a usage exception in open.
errors = _open_kwargs['errors']
with open_file(file, mode='r', encoding=encoding,
newline=newline, errors=errors, **kwargs) as fh:
try:
# Some formats may have headers which indicate their
# format sniffers should be able to rely on the
# filehandle to point at the beginning of the file.
fh.seek(0)
return sniffer(fh)
except UnicodeDecodeError:
pass
except Exception:
warn("'%s' has encountered a problem.\nPlease"
" send the following to our issue tracker at\n"
"https://github.com/biocore/scikit-bio/issues\n\n"
"%s" % (sniffer.__name__, traceback.format_exc()),
FormatIdentificationWarning)
return False, {}
self._sniffer_function = wrapped_sniffer
return wrapped_sniffer
return decorator
@stable(as_of="0.4.0")
def reader(self, cls, monkey_patch=True, override=False):
"""Decorate a function to act as the reader for a class in this format.
The function should take an argument which will be an implementation
of either :class:`io.TextIOBase` or :class:`io.BufferedReader`
depending on if the format is text or binary, respectively. Any kwargs
given by the user which are not handled by :func:`skbio.io.util.open`
will be passed into the function. Any kwarg with a default of
`FileSentinel` will transform user input for that parameter into a
filehandle or `None` if not provided.
Parameters
----------
cls : type or None
The class which the function will be registered to handle. If
None, it is assumed that the function will produce a generator.
monkey_patch : bool, optional
Whether to allow an IORegistry to attach a `read` method to `cls`
with this format listed as an option.
override : bool, optional
If True, any existing readers for `cls` in this format will be
overriden.
Raises
------
DuplicateRegistrationError
When `override` is False and a reader is already registered to
`cls` for this format.
Examples
--------
>>> from skbio.io.registry import Format, IORegistry
>>> registry = IORegistry()
>>> myformat = Format('myformat')
>>> registry.add_format(myformat)
>>> # If developing a new format for skbio, use the create_format()
>>> # factory instead of the above.
>>> class MyObject(object):
... def __init__(self, content):
... self.content = content
...
>>> @myformat.reader(MyObject)
... def myformat_reader(fh):
... return MyObject(fh.readlines()[1:])
...
>>> registry.monkey_patch() # If developing skbio, this isn't needed
>>> MyObject.read(["myformat2\\n", "some content here!\\n"],
... format='myformat').content
['some content here!\\n']
"""
self._check_registration(cls)
def decorator(reader_function):
file_params = find_sentinels(reader_function, FileSentinel)
# This split has to occur for the same reason as in IORegistry.read
if cls is not None:
@wraps(reader_function)
def wrapped_reader(file, encoding=self._encoding,
newline=self._newline, **kwargs):
file_keys, files, io_kwargs = self._setup_locals(
file_params, file, encoding, newline, kwargs)
with open_files(files, mode='r', **io_kwargs) as fhs:
# The primary file is at the end of fh because append
# is cheaper than insert
kwargs.update(zip(file_keys, fhs[:-1]))
return reader_function(fhs[-1], **kwargs)
else:
@wraps(reader_function)
def wrapped_reader(file, encoding=self._encoding,
newline=self._newline, **kwargs):
file_keys, files, io_kwargs = self._setup_locals(
file_params, file, encoding, newline, kwargs)
with open_files(files, mode='r', **io_kwargs) as fhs:
kwargs.update(zip(file_keys, fhs[:-1]))
generator = reader_function(fhs[-1], **kwargs)
while True:
yield next(generator)
self._add_reader(cls, wrapped_reader, monkey_patch, override)
return wrapped_reader
return decorator
@stable(as_of="0.4.0")
def writer(self, cls, monkey_patch=True, override=False):
"""Decorate a function to act as the writer for a class in this format.
The function should take an instance of `cls` as its first argument
and the second argument is a filehandle which will be an implementation
of either :class:`io.TextIOBase` or :class:`io.BufferedWriter`
depending on if the format is text or binary, respectively. Any kwargs
given by the user which are not handled by :func:`skbio.io.util.open`
will be passed into the function. Any kwarg with a default of
`FileSentinel` will transform user input for that parameter into a
filehandle or `None` if not provided.
Parameters
----------
cls : type or None
The class which the function will be registered to handle. If
None, it is assumed that the function will consume a generator.
monkey_patch : bool, optional
Whether to allow an IORegistry to attach a `write` method to `cls`
with this format listed as an option.
override : bool, optional
If True, any existing writers for `cls` in this format will be
overriden.
Raises
------
DuplicateRegistrationError
When `override` is False and a writer is already registered to
`cls` for this format.
Examples
--------
>>> from skbio.io.registry import Format, IORegistry
>>> registry = IORegistry()
>>> myformat = Format('myformat')
>>> registry.add_format(myformat)
>>> # If developing a new format for skbio, use the create_format()
>>> # factory instead of the above.
>>> class MyObject(object):
... default_write_format = 'myformat'
... def __init__(self, content):
... self.content = content
...
>>> @myformat.writer(MyObject)
... def myformat_reader(obj, fh):
... fh.write("myformat2\\n")
... for c in obj.content:
... fh.write(c)
...
>>> registry.monkey_patch() # If developing skbio, this isn't needed
>>> obj = MyObject(["some content here!\\n"])
>>> obj.write([], format='myformat')
['myformat2\\n', 'some content here!\\n']
"""
self._check_registration(cls)
def decorator(writer_function):
file_params = find_sentinels(writer_function, FileSentinel)
@wraps(writer_function)
def wrapped_writer(obj, file, encoding=self._encoding,
newline=self._newline, **kwargs):
file_keys, files, io_kwargs = self._setup_locals(
file_params, file, encoding, newline, kwargs)
with open_files(files, mode='w', **io_kwargs) as fhs:
kwargs.update(zip(file_keys, fhs[:-1]))
writer_function(obj, fhs[-1], **kwargs)
self._add_writer(cls, wrapped_writer, monkey_patch, override)
return wrapped_writer
return decorator
def _check_registration(self, cls):
if cls is not None and not inspect.isclass(cls):
raise InvalidRegistrationError("`cls` must be a class or None, not"
" %r" % cls)
def _setup_locals(self, file_params, file, encoding, newline, kwargs):
self._validate_encoding(encoding)
io_kwargs = self._pop_io_kwargs(kwargs, encoding, newline)
file_keys, files = self._setup_file_args(kwargs, file_params)
files.append(file)
return file_keys, files, io_kwargs
def _validate_encoding(self, encoding):
if encoding != self._encoding:
if self._encoding == 'binary':
raise ValueError("Encoding must be 'binary' for %r"
% self.name)
if encoding == 'binary':
raise ValueError("Encoding must not be 'binary' for %r"
% self.name)
def _pop_io_kwargs(self, kwargs, encoding, newline):
io_kwargs = dict(encoding=encoding, newline=newline)
for key in _open_kwargs:
if key in kwargs:
io_kwargs[key] = kwargs.pop(key)
return io_kwargs
def _setup_file_args(self, kwargs, file_params):
file_keys = []
files = []
for param in file_params:
arg = kwargs.get(param, None)
if arg is not None:
file_keys.append(param)
files.append(arg)
else:
# set to None to mask FileSentinel when user neglected argument
kwargs[param] = None
return file_keys, files
def _add_writer(self, cls, writer, monkey_patch, override):
if cls in self._writers and not override:
raise DuplicateRegistrationError("There is already a writer"
" registered to %s in format: %s"
% (cls, self._name))
self._writers[cls] = writer
if monkey_patch and cls is not None:
self._monkey_patch['write'].add(cls)
def _add_reader(self, cls, reader, monkey_patch, override):
if cls in self._readers and not override:
raise DuplicateRegistrationError("There is already a reader"
" registered to %s in format: %s"
% (cls, self._name))
self._readers[cls] = reader
if monkey_patch and cls is not None:
self._monkey_patch['read'].add(cls)
io_registry = IORegistry()
@wraps(IORegistry.sniff)
def sniff(file, **kwargs):
return io_registry.sniff(file, **kwargs)
@wraps(IORegistry.read)
def read(file, format=None, into=None, verify=True, **kwargs):
return io_registry.read(file, format=format, into=into, verify=verify,
**kwargs)
@wraps(IORegistry.write)
def write(obj, format, into, **kwargs):
return io_registry.write(obj, format, into, **kwargs)
@wraps(IORegistry.create_format)
def create_format(*args, **kwargs):
return io_registry.create_format(*args, **kwargs)
|
jdrudolph/scikit-bio
|
skbio/io/registry.py
|
Python
|
bsd-3-clause
| 42,124
|
[
"scikit-bio"
] |
0ccc37413bd1aa2ef367aa48aab3362a834bdc12e6244ac73df0d071f1dfd729
|
#!/usr/bin/env python3
# Copyright (C) 2015-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# This is an example for an MD simulation of a simple Lennard-Jones #
# fluid with ESPResSo++. #
# #
###########################################################################
"""
We will start with particles at random positions within
the simulation box interacting via a shifted Lennard-Jones type potential
with an interaction cutoff at 2.5.
Newtons equations of motion are integrated with a Velocity-Verlet integrator.
The canonical (NVT) ensemble is realized by using a Langevin thermostat.
In order to prevent explosion due to strongly overlapping volumes of
random particles the system needs to be warmed up first.
Warm-up is accomplished by using a repelling-only LJ interaction
(cutoff=1.12246, shift=0.25) with a force capping at radius 0.6
and initial small LJ epsilon value of 0.1.
During warmup epsilon is gradually increased to its final value 1.0.
After warm-up the system is equilibrated using the full uncapped LJ Potential.
If a system still explodes during warmup or equilibration, warmup time
could be increased by increasing warmup_nloops and the capradius could
be set to another value. Depending on the system (number of particles, density, ...)
it could also be necessary to vary sigma during warmup.
The simulation consists of the following steps:
1. specification of the main simulation parameters
2. setup of the system, random number generator and parallelisation
3. setup of the integrator and simulation ensemble
4. adding the particles
5. setting up interaction potential for the warmup
6. running the warmup loop
7. setting up interaction potential for the equilibration
8. running the equilibration loop
9. writing configuration to a file
"""
import espressopp
########################################################################
# 1. specification of the main simulation parameters #
########################################################################
# number of particles
Npart = 32768
# density of particles
rho = 0.8442
# length of simulation box
L = pow(Npart/rho, 1.0/3.0)
# cubic simulation box of size L
box = (L, L, L)
# cutoff of the short range potential
r_cutoff = 2.5
# VerletList skin size (also used for domain decomposition)
skin = 0.4
# the temperature of the system
temperature = 1.0
# time step for the velocity verlet integrator
dt = 0.005
# Lennard Jones epsilon during equilibration phase
epsilon = 1.0
# Lennard Jones sigma during warmup and equilibration
sigma = 1.0
# interaction cut-off used during the warm-up phase
warmup_cutoff = pow(2.0, 1.0/6.0)
# number of warm-up loops
warmup_nloops = 100
# number of integration steps performed in each warm-up loop
warmup_isteps = 200
# total number of integration steps of the warm-up phase
total_warmup_steps = warmup_nloops * warmup_isteps
# initial value for LJ epsilon at beginning of warmup
epsilon_start = 0.1
# final value for LJ epsilon at end of warmup
epsilon_end = 1.0
# increment epsilon by epsilon delta after each warmup_loop
epsilon_delta = (epsilon_end - epsilon_start) / warmup_nloops
# force capping radius
capradius = 0.6
# number of equilibration loops
equil_nloops = 100
# number of integration steps performed in each equilibration loop
equil_isteps = 100
# print ESPResSo++ version and compile info
print(espressopp.Version().info())
# print simulation parameters (useful to have them in a log file)
print("Npart = ", Npart)
print("rho = ", rho)
print("L = ", L)
print("box = ", box)
print("r_cutoff = ", r_cutoff)
print("skin = ", skin)
print("temperature = ", temperature)
print("dt = ", dt)
print("epsilon = ", epsilon)
print("sigma = ", sigma)
print("warmup_cutoff = ", warmup_cutoff)
print("warmup_nloops = ", warmup_nloops)
print("warmup_isteps = ", warmup_isteps)
print("total_warmup_steps = ", total_warmup_steps)
print("epsilon_start = ", epsilon_start)
print("epsilon_end = ", epsilon_end)
print("epsilon_delta = ", epsilon_delta)
print("capradius = ", capradius)
print("equil_nloops = ", equil_nloops)
print("equil_isteps = ", equil_isteps)
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
system.rng = espressopp.esutil.RNG()
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs,box,warmup_cutoff, skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, warmup_cutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
print("NCPUs = ", NCPUs)
print("nodeGrid = ", nodeGrid)
print("cellGrid = ", cellGrid)
########################################################################
# 3. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create e Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 1.0
# set temperature
thermostat.temperature = temperature
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
## steps 2. and 3. could be short-cut by the following expression:
## system, integrator = espressopp.standard_system.Default(box, warmup_cutoff, skin, dt, temperature)
########################################################################
# 4. adding the particles #
########################################################################
print("adding ", Npart, " particles to the system ...")
for pid in range(int(Npart)):
# get a 3D random coordinate within the box
pos = system.bc.getRandomPos()
# add a particle with particle id pid and coordinate pos to the system
# coordinates are automatically folded according to periodic boundary conditions
# the following default values are set for each particle:
# (type=0, mass=1.0, velocity=(0,0,0), charge=0.0)
system.storage.addParticle(pid, pos)
# distribute the particles to parallel CPUs
system.storage.decompose()
########################################################################
# 5. setting up interaction potential for the warmup #
########################################################################
# create a verlet list that uses a cutoff radius = warmup_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, warmup_cutoff)
# create a force capped Lennard-Jones potential
# the potential is automatically shifted so that U(r=cutoff) = 0.0
LJpot = espressopp.interaction.LennardJonesCapped(epsilon=epsilon_start, sigma=sigma, cutoff=warmup_cutoff, caprad=capradius, shift='auto')
# create a force capped Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJonesCapped(verletlist)
# tell the interaction to use the above defined force capped Lennard-Jones potential
# between 2 particles of type 0
interaction.setPotential(type1=0, type2=0, potential=LJpot)
########################################################################
# 6. running the warmup loop
########################################################################
# make the force capping interaction known to the system
system.addInteraction(interaction)
print("starting warm-up ...")
# print some status information (time, measured temperature, pressure,
# pressure tensor (xy only), kinetic energy, potential energy, total energy, boxsize)
espressopp.tools.analyse.info(system, integrator)
for step in range(warmup_nloops):
# perform warmup_isteps integraton steps
integrator.run(warmup_isteps)
# decrease force capping radius in the potential
LJpot.epsilon += epsilon_delta
# update the type0-type0 interaction to use the new values of LJpot
interaction.setPotential(type1=0, type2=0, potential=LJpot)
# print status info
espressopp.tools.analyse.info(system, integrator)
print("warmup finished")
# remove the force capping interaction from the system
system.removeInteraction(0)
# the equilibration uses a different interaction cutoff therefore the current
# verlet list is not needed any more and would waste only CPU time
verletlist.disconnect()
########################################################################
# 7. setting up interaction potential for the equilibration #
########################################################################
# create a new verlet list that uses a cutoff radius = r_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, r_cutoff)
# define a Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJones(verletlist)
# use a Lennard-Jones potential between 2 particles of type 0
# the potential is automatically shifted so that U(r=cutoff) = 0.0
# if the potential should not be shifted set shift=0.0
potential = interaction.setPotential(type1=0, type2=0,
potential=espressopp.interaction.LennardJones(
epsilon=epsilon, sigma=sigma, cutoff=r_cutoff, shift=0.0))
########################################################################
# 8. running the equilibration loop #
########################################################################
# add the new interaction to the system
system.addInteraction(interaction)
# since the interaction cut-off changed the size of the cells that are used
# to speed up verlet list builds should be adjusted accordingly
system.storage.cellAdjust()
# set all integrator timers to zero again (they were increased during warmup)
integrator.resetTimers()
# set integrator time step to zero again
integrator.step = 0
print("starting equilibration ...")
# print inital status information
espressopp.tools.analyse.info(system, integrator)
for step in range(equil_nloops):
# perform equilibration_isteps integration steps
integrator.run(equil_isteps)
# print status information
espressopp.tools.analyse.info(system, integrator)
print("equilibration finished")
########################################################################
# 9. writing configuration to file #
########################################################################
# write folded xyz coordinates and particle velocities into a file
# format of xyz file is:
# first line : number of particles
# second line : box_Lx, box_Ly, box_Lz
# all other lines : ParticleID ParticleType x_pos y_pos z_pos x_vel y_vel z_vel
filename = "lennard_jones_fluid_%0i.xyz" % integrator.step
print("writing final configuration file ...")
espressopp.tools.writexyz(filename, system, velocities = True, unfolded = False)
# also write a PDB file which can be used to visualize configuration with VMD
print("writing pdb file ...")
filename = "lennard_jones_fluid_%0i.pdb" % integrator.step
espressopp.tools.pdbwrite(filename, system, molsize=Npart)
print("finished.")
|
espressopp/espressopp
|
examples/lennard_jones/lennard_jones.py
|
Python
|
gpl-3.0
| 13,603
|
[
"ESPResSo",
"VMD"
] |
141f6093df69eb2bfaabd15932f014a3cd7278cfd3f57a52104b0988615d2607
|
#!/usr/bin/env python
"tune ELM + linear regressor on top"
import pandas as pd
import numpy as np
import math
import csv
import hyperopt
import os
import sys
from time import time
from glob import glob
from math import log
from hyperopt import hp, fmin, tpe
from elm import *
from random_layer import *
from sklearn import pipeline
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error as MSE
from sklearn.preprocessing import StandardScaler as Scaler
def RMSE( y, p ):
return math.sqrt( MSE( y, p ))
###
input_file = 'data/train_num.csv'
try:
output_file = sys.argv[1]
except IndexError:
output_file = 'hyperopt_log_pipeline.csv'
data = pd.read_csv( input_file )
train = data[0:93170]
test = data[93170:]
x_train = train[[ c for c in train.columns if c != 'cpu_01_busy' ]]
x_test = test[[ c for c in train.columns if c != 'cpu_01_busy' ]]
scaler = Scaler()
x_train = scaler.fit_transform( x_train )
x_test = scaler.transform( x_test )
y_train = train[ 'cpu_01_busy' ]
y_test = test[ 'cpu_01_busy' ]
#
max_evals = 50
run_counter = 0
def run_wrapper( params ):
global run_counter
global o_f
run_counter += 1
print "run", run_counter
s = time()
rmse = run_test( params )
print
print "RMSE:", rmse
print "elapsed: {}s \n".format( int( round( time() - s )))
writer.writerow( [ rmse ] + list( params ))
o_f.flush()
return rmse
def run_test( params ):
n_hidden, alpha, rbf_width, activation_func, ridge_alpha = params
n_hidden = int( n_hidden )
print "n_hidden:", n_hidden
print "alpha:", alpha
print "rbf_width:", rbf_width
print "activation_func:", activation_func
print "ridge_alpha:", ridge_alpha
rl = RandomLayer( n_hidden = n_hidden, alpha = alpha,
rbf_width = rbf_width, activation_func = activation_func )
ridge = Ridge( alpha = ridge_alpha )
elmr = pipeline.Pipeline( [( 'rl', rl ), ( 'ridge', ridge )] )
elmr.fit( x_train, y_train )
p = elmr.predict( x_test )
rmse = RMSE( y_test, p )
return rmse
###
space = (
hp.qloguniform( 'n_hidden', log( 10 ), log( 1000 ), 1 ),
hp.uniform( 'alpha', 0, 1 ),
hp.loguniform( 'rbf_width', log( 1e-5 ), log( 100 )),
hp.choice( 'activation_func', [ 'tanh', 'sine', 'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric','inv_multiquadric' ] ),
hp.loguniform( 'ridge_alpha', -15, 5 )
)
###
if __name__ == '__main__':
headers = [ 'rmse', 'n_hidden', 'alpha', 'rbf_width', 'activation_func', 'ridge_alpha' ]
o_f = open( output_file, 'wb' )
writer = csv.writer( o_f )
writer.writerow( headers )
start_time = time()
best = fmin( run_wrapper, space, algo = tpe.suggest, max_evals = max_evals )
end_time = time()
print "Seconds passed:", int( round( end_time - start_time ))
#print "Best run:", optimizer.get_best_run()
print best
#print run_test( hyperopt.space_eval( space, best ))
|
duthchao/kaggle-burn-cpu
|
pipeline_driver.py
|
Python
|
bsd-2-clause
| 2,877
|
[
"Gaussian"
] |
4bbe51a229c6d39daa5705755c26c9ddd02c53a778b5d6270dde616430d4352c
|
#!/usr/bin/env python
import os
import re
import netCDF4 as nc
import numpy as np
import pandas as pd
import pickle
from natsort import natsorted
from collections import OrderedDict
# mpi
from joblib import delayed, Parallel
from multiprocessing import cpu_count
def get_value(nc_obj, label):
return nc_obj.variables[label][:].flatten()
def get_dataframe(nc_path):
"""
A quick function to transform a netcdf file into a pandas dataframe that
can be used for analysis and plotting. Attributes are extracted using
in built netCDF4 library functions. Time is arbitrary and needs to be
set by the user.
"""
print("> pickling contents in object at {0}".format(nc_path))
# make a connection to the netCDF file
ncdf_con = nc.Dataset(nc_path, 'r', format="NETCDF4")
# number of rows, equivalent to time-steps
time_len = len(ncdf_con.dimensions['time'])
# extract time information
time_sec = ncdf_con.variables['time']
sec_orig = re.search(r'\d+.*', str(time_sec.units)).group(0)
# the header values for each measurements; excludes time and space components
nc_allkeys = ncdf_con.variables.keys()
# only want tree and grass outputs
data_values = [key for key in nc_allkeys \
if re.search('(tree)|(grass)', key)]
# create a new dataframe from the netCDF file
nc_dataframe = pd.DataFrame({label: get_value(ncdf_con, label) \
for label in data_values}, \
index=pd.date_range(sec_orig, \
periods=time_len, freq="30min"))
return nc_dataframe
def main():
# Get the number of available cores for multi-proc
num_cores = cpu_count()
# Get the filepaths for each experiment's output ncdf file
nc_paths = natsorted([os.path.join(dp, f) for (dp, dn, fn) in os.walk(DIRPATH) \
for f in fn if re.search("^((?!DS_Store|inputs|tower).)*$", f)])
# Retrieve dataframes of tree and grass productivity from ncdf files
hws_dfs = Parallel(n_jobs=num_cores)(delayed(get_dataframe)(npf) \
for npf in nc_paths)
# pickle the leaf scale outputs (see if it's quicker to load)
hws_dict = {"Exp_{0}".format(i+1): df for (i, df) in enumerate(hws_dfs)}
pickle.dump(hws_dict, open(PKLPATH+"hourly/leaf_dict.pkl", "wb"))
return None
if __name__ == '__main__':
DIRPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/ncdf/")
PKLPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/pickled/")
SAVEPATH = os.path.expanduser("~/Savanna/Analysis/figures/IAV/HWS_leaf_behaviour.pdf")
main()
|
rhyswhitley/savanna_iav
|
src/data_preproc/pickleit/pickle_leafoutputs.py
|
Python
|
cc0-1.0
| 2,668
|
[
"NetCDF"
] |
f94e96e76bea444afb1b3d8a142e5a2a3bdd6609ff2375946438dc941a01d375
|
#!/usr/bin/python3
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
import time
import configparser
import os
import errno
import psycopg2
import psycopg2.extras
import re
import traceback
import urllib.request, urllib.error, urllib.parse
import io
import sys
import argparse
import datetime
import shutil
config = configparser.ConfigParser()
if not config.read("muninrc"):
# No config found.
raise ValueError("Expected configuration file muninrc" ", not found.")
useragent = "Munin (Python-urllib/%s); BotNick/%s; Admin/%s" % (
urllib.request.__version__,
config.get("Connection", "nick"),
config.get("Auth", "owner_nick"),
)
DSN = "dbname=%s user=%s" % (
config.get("Database", "dbname"),
config.get("Database", "user"),
)
if config.has_option("Database", "password"):
DSN += " password=%s" % config.get("Database", "password")
if config.has_option("Database", "host"):
DSN += " host=%s" % config.get("Database", "host")
t_start = time.time()
ofile = open("pid.hugin", "w")
ofile.write("%s" % (os.getpid(),))
ofile.close()
def write_to_file(data, out):
with open(out, "w") as f:
size = 16 * 1024
chunk = True
while chunk:
chunk = data.read(size)
if chunk:
f.write(chunk.decode("iso-8859-1"))
def overwrite(from_file, to_file):
try:
os.unlink(to_file)
except OSError:
pass
# Moving a file while it's still open is impossible on Windows.
shutil.copy2(from_file, to_file)
class InvalidTickException(Exception):
pass
class AncientStaleTickException(Exception):
pass
def extract_tick(feed):
feed.readline()
feed.readline()
feed.readline()
tick_line = feed.readline()
m = re.search(r"tick:\s+(\d+)", tick_line, re.I)
if not m:
raise InvalidTickException("Invalid tick: '%s'" % (tick_line,))
tick = int(m.group(1))
feed.readline()
feed.readline()
feed.readline()
feed.readline()
return tick
while True:
try:
cur_round = config.getint("Planetarion", "current_round")
planetlist = config.get("Url", "planetlist")
galaxylist = config.get("Url", "galaxylist")
alliancelist = config.get("Url", "alliancelist")
userfeedlist = config.get("Url", "userfeed")
planet_file = planetlist.split("/")[-1]
galaxy_file = galaxylist.split("/")[-1]
alliance_file = alliancelist.split("/")[-1]
userfeed_file = userfeedlist.split("/")[-1]
write_dumps = config.getboolean("Dumps", "write")
from_web = False
parser = argparse.ArgumentParser(
description="Planetarion dumps processor for Munin.",
epilog="Note that --planets, --galaxies, --alliances and --userfeed must either be given together, or not at all (in which case the most recent dumps are retrieved from the web)",
)
parser.add_argument(
"-p", "--planets", type=argparse.FileType(mode='r', encoding='latin-1'), metavar="FILE"
)
parser.add_argument(
"-g", "--galaxies", type=argparse.FileType(mode='r', encoding='latin-1'), metavar="FILE"
)
parser.add_argument(
"-a", "--alliances", type=argparse.FileType(mode='r', encoding='latin-1'), metavar="FILE"
)
parser.add_argument(
"-u", "--userfeed", type=argparse.FileType(mode='r', encoding='latin-1'), metavar="FILE"
)
parser.add_argument(
"-r", "--round", type=int, default=cur_round, metavar="NUMBER"
)
args = parser.parse_args()
cur_round = args.round
t1 = time.time()
if args.planets and args.galaxies and args.alliances and args.userfeed:
planets = args.planets
galaxies = args.galaxies
alliances = args.alliances
userfeed = args.userfeed
elif args.planets or args.galaxies or args.alliances or args.userfeed:
print(
"%s: error: The options --planets, --galaxies, --alliance and --userfeed must either be given together or not at all!\n"
% (sys.argv[0])
)
exit(3)
else:
from_web = True
try:
req = urllib.request.Request(planetlist)
req.add_header("User-Agent", useragent)
planets = urllib.request.urlopen(req)
write_to_file(planets, planet_file)
planets = open(planet_file, "r")
except Exception as e:
print("Failed gathering planet listing.")
print(e.__str__())
time.sleep(300)
continue
try:
req = urllib.request.Request(galaxylist)
req.add_header("User-Agent", useragent)
galaxies = urllib.request.urlopen(req)
write_to_file(galaxies, galaxy_file)
galaxies = open(galaxy_file, "r")
except Exception as e:
print("Failed gathering galaxy listing.")
print(e.__str__())
time.sleep(300)
continue
try:
req = urllib.request.Request(alliancelist)
req.add_header("User-Agent", useragent)
alliances = urllib.request.urlopen(req)
write_to_file(alliances, alliance_file)
alliances = open(alliance_file, "r")
except Exception as e:
print("Failed gathering alliance listing.")
print(e.__str__())
time.sleep(300)
continue
try:
req = urllib.request.Request(userfeedlist)
req.add_header("User-Agent", useragent)
userfeed = urllib.request.urlopen(req)
write_to_file(userfeed, userfeed_file)
userfeed = open(userfeed_file, "r")
except Exception as e:
print("Failed gathering user feed.")
print(e.__str__())
time.sleep(300)
continue
try:
planet_tick = extract_tick(planets)
galaxy_tick = extract_tick(galaxies)
alliance_tick = extract_tick(alliances)
userfeed_tick = extract_tick(userfeed)
except InvalidTickException as e:
print(e.message)
time.sleep(120)
continue
print("Planet dump for tick %s" % (planet_tick,))
print("Galaxy dump for tick %s" % (galaxy_tick,))
print("Alliance dump for tick %s" % (alliance_tick,))
print("User feed dump for tick %s" % (userfeed_tick,))
if not (planet_tick == galaxy_tick == alliance_tick == userfeed_tick):
print("Varying ticks found, sleeping")
print(
"Planet: %s, Galaxy: %s, Alliance: %s, User feed: %s"
% (planet_tick, galaxy_tick, alliance_tick, userfeed_tick)
)
time.sleep(30)
continue
if from_web and write_dumps:
# Store the newly retrieved dump files
dump_dir = config.get("Dumps", "dir")
tick_dir = os.path.join(dump_dir, "r%03d" % cur_round, "%04d" % planet_tick)
try:
os.makedirs(tick_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
overwrite(planet_file, os.path.join(tick_dir, planet_file))
overwrite(galaxy_file, os.path.join(tick_dir, galaxy_file))
overwrite(alliance_file, os.path.join(tick_dir, alliance_file))
overwrite(userfeed_file, os.path.join(tick_dir, userfeed_file))
print("Wrote dump files to disk")
conn = psycopg2.connect(DSN)
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(
"SELECT tick,timestamp FROM updates where round = %s and tick = (select max_tick(%s::smallint))",
(cur_round, cur_round),
)
last_tick_info = cursor.fetchone()
last_tick = -1
if last_tick_info:
last_tick = int(last_tick_info["tick"])
if not planet_tick > last_tick:
if from_web:
delta = datetime.datetime.now() - last_tick_info["timestamp"]
wait = 60
if delta.days > 1 or delta.seconds >= (6 * 3600):
raise AncientStaleTickException(
"Stale tick was %d days and %d seconds old, has the round ended?"
% (delta.days, delta.seconds)
)
else:
wait = 1 + (abs(3600 - delta.seconds) % 900)
print("Stale ticks found, sleeping %d seconds" % (wait,))
time.sleep(wait)
continue
else:
print(
"Warning: stale ticks found, but dump files were passed on command line, continuing"
)
t2 = time.time() - t1
if from_web:
print("Loaded dumps from webserver in %.3f seconds" % (t2,))
else:
print("Loaded dumps from file in %.3f seconds" % (t2,))
t1 = time.time()
ptmp = "ptmp"
gtmp = "gtmp"
atmp = "atmp"
utmp = "utmp"
query = """
CREATE TEMP TABLE %s (
uid varchar(12) NOT NULL,
x smallint,
y smallint,
z smallint,
planetname varchar(22) NOT NULL,
rulername varchar(32) NOT NULL,
race char(3) CHECK (race in (NULL,'Ter','Cat','Xan','Zik','Etd')),
size integer NOT NULL,
score integer NOT NULL,
value integer NOT NULL,
xp integer NOT NULL,
special varchar(10) NOT NULL
)
""" % (
ptmp,
)
cursor.execute(query)
foo = planets.readlines()[:-1]
cursor.copy_from(io.StringIO("".join(foo)), ptmp, "\t")
query = """
CREATE TEMP TABLE %s (
x smallint,
y smallint,
name varchar(66) NOT NULL,
size int NOT NULL,
score bigint DEFAULT 0,
value bigint NOT NULL,
xp integer NOT NULL
)
""" % (
gtmp,
)
cursor.execute(query)
foo = galaxies.readlines()[:-1]
cursor.copy_from(io.StringIO("".join(foo)), gtmp, "\t", null="")
query = """
CREATE TEMP TABLE %s (
score_rank smallint NOT NULL,
name varchar(22) NOT NULL,
size int NOT NULL,
members smallint NOT NULL,
score bigint NOT NULL,
points bigint NOT NULL,
total_score bigint NOT NULL,
total_value bigint NOT NULL
)
""" % (
atmp,
)
cursor.execute(query)
foo = alliances.readlines()[:-1]
cursor.copy_from(io.StringIO("".join(foo)), atmp, "\t")
query = """
CREATE TEMP TABLE %s (
tick smallint NOT NULL,
type varchar(32) NOT NULL,
text varchar(255) NOT NULL
)
""" % (
utmp,
)
cursor.execute(query)
foo = userfeed.readlines()[:-1]
cursor.copy_from(io.StringIO("".join(foo)), utmp, "\t")
t2 = time.time() - t1
print("Copied dumps in %.3f seconds" % (t2,))
t1 = time.time()
query = "SELECT store_update(%s::smallint,%s::smallint,%s::text,%s::text,%s::text,%s::text)"
cursor.execute(query, (cur_round, planet_tick, ptmp, gtmp, atmp, utmp))
try:
query = "SELECT store_planets(%s::smallint,%s::smallint)"
cursor.execute(query, (cur_round, planet_tick,))
t2 = time.time() - t1
print("Processed and inserted planet dumps in %.3f seconds" % (t2,))
t1 = time.time()
query = "SELECT store_galaxies(%s::smallint,%s::smallint)"
cursor.execute(query, (cur_round, galaxy_tick,))
t2 = time.time() - t1
print("Processed and inserted galaxy dumps in %.3f seconds" % (t2,))
t1 = time.time()
query = "SELECT store_alliances(%s::smallint,%s::smallint)"
cursor.execute(query, (cur_round, alliance_tick,))
t2 = time.time() - t1
print("Processed and inserted alliance dumps in %.3f seconds" % (t2,))
t1 = time.time()
query = "SELECT store_userfeed(%s::smallint)"
cursor.execute(query, (cur_round,))
t2 = time.time() - t1
print("Processed and inserted user feed dumps in %.3f seconds" % (t2,))
t1 = time.time()
planets.close()
galaxies.close()
alliances.close()
userfeed.close()
except psycopg2.IntegrityError:
raise
conn.commit()
break
except AncientStaleTickException as a:
print("Something random went wrong, crashing out and waiting for cron rerun")
print(a.__str__())
traceback.print_exc()
sys.exit(1)
except Exception as e:
print(
"Something random went wrong, sleeping for 15 seconds to hope it improves"
)
print(e.__str__())
traceback.print_exc()
time.sleep(15)
continue
t2 = time.time() - t1
t1 = time.time() - t_start
print("Commit in %.3f seconds" % (t2,))
print("Total time taken: %.3f seconds" % (t1,))
|
munin/munin
|
utils/hugin.py
|
Python
|
gpl-2.0
| 14,435
|
[
"Galaxy"
] |
13175e1b6fed435f206bbd82fef9df0c6a61eefb19db62c2d2da4b1dd95c9330
|
# -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import bson
import os
import pickle
import unittest
import uuid
import weakref
from datetime import datetime
from bson import DBRef, ObjectId
from tests import fixtures
from tests.fixtures import (PickleEmbedded, PickleTest, PickleSignalsTest,
PickleDyanmicEmbedded, PickleDynamicTest)
from mongoengine import *
from mongoengine.errors import (NotRegistered, InvalidDocumentError,
InvalidQueryError, NotUniqueError,
FieldDoesNotExist, SaveConditionError)
from mongoengine.queryset import NULLIFY, Q
from mongoengine.connection import get_db
from mongoengine.base import get_document
from mongoengine.context_managers import switch_db, query_counter
from mongoengine import signals
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__),
'../fields/mongoengine.png')
__all__ = ("InstanceTest",)
class InstanceTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Job(EmbeddedDocument):
name = StringField()
years = IntField()
class Person(Document):
name = StringField()
age = IntField()
job = EmbeddedDocumentField(Job)
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
self.Job = Job
def tearDown(self):
for collection in self.db.collection_names():
if 'system.' in collection:
continue
self.db.drop_collection(collection)
def assertDbEqual(self, docs):
self.assertEqual(
list(self.Person._get_collection().find().sort("id")),
sorted(docs, key=lambda doc: doc["_id"]))
def assertHasInstance(self, field, instance):
self.assertTrue(hasattr(field, "_instance"))
self.assertTrue(field._instance is not None)
if isinstance(field._instance, weakref.ProxyType):
self.assertTrue(field._instance.__eq__(instance))
else:
self.assertEqual(field._instance, instance)
def test_capped_collection(self):
"""Ensure that capped collections work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
'max_size': 4096,
}
Log.drop_collection()
# Ensure that the collection handles up to its maximum
for _ in range(10):
Log().save()
self.assertEqual(Log.objects.count(), 10)
# Check that extra documents don't increase the size
Log().save()
self.assertEqual(Log.objects.count(), 10)
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 4096)
# Check that the document cannot be redefined with different options
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 11,
}
# Create the collection by accessing Document.objects
Log.objects
self.assertRaises(InvalidCollectionError, recreate_log_document)
Log.drop_collection()
def test_capped_collection_default(self):
"""Ensure that capped collections defaults work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 10 * 2**20)
# Check that the document with default value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_capped_collection_no_max_size_problems(self):
"""Ensure that capped collections with odd max_size work properly.
MongoDB rounds up max_size to next multiple of 256, recreating a doc
with the same spec failed in mongoengine <0.10
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertTrue(options['size'] >= 10000)
# Check that the document with odd max_size value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_repr(self):
"""Ensure that unicode representation works
"""
class Article(Document):
title = StringField()
def __unicode__(self):
return self.title
doc = Article(title=u'привет мир')
self.assertEqual('<Article: привет мир>', repr(doc))
def test_repr_none(self):
"""Ensure None values handled correctly
"""
class Article(Document):
title = StringField()
def __str__(self):
return None
doc = Article(title=u'привет мир')
self.assertEqual('<Article: None>', repr(doc))
def test_queryset_resurrects_dropped_collection(self):
self.Person.drop_collection()
self.assertEqual([], list(self.Person.objects()))
class Actor(self.Person):
pass
# Ensure works correctly with inhertited classes
Actor.objects()
self.Person.drop_collection()
self.assertEqual([], list(Actor.objects()))
def test_polymorphic_references(self):
"""Ensure that the correct subclasses are returned from a query when
using references / generic references
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal):
pass
class Mammal(Animal):
pass
class Dog(Mammal):
pass
class Human(Mammal):
pass
class Zoo(Document):
animals = ListField(ReferenceField(Animal))
Zoo.drop_collection()
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Dog().save()
Human().save()
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
class Zoo(Document):
animals = ListField(GenericReferenceField(Animal))
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.objects.first().stats)
def test_db_field_load(self):
"""Ensure we load data correctly
"""
class Person(Document):
name = StringField(required=True)
_rank = StringField(required=False, db_field="rank")
@property
def rank(self):
return self._rank or "Private"
Person.drop_collection()
Person(name="Jack", _rank="Corporal").save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_db_embedded_doc_field_load(self):
"""Ensure we load embedded document data correctly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank_ = EmbeddedDocumentField(Rank,
required=False,
db_field='rank')
@property
def rank(self):
if self.rank_ is None:
return "Private"
return self.rank_.title
Person.drop_collection()
Person(name="Jack", rank_=Rank(title="Corporal")).save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_custom_id_field(self):
"""Ensure that documents may be created with custom primary keys.
"""
class User(Document):
username = StringField(primary_key=True)
name = StringField()
meta = {'allow_inheritance': True}
User.drop_collection()
self.assertEqual(User._fields['username'].db_field, '_id')
self.assertEqual(User._meta['id_field'], 'username')
def create_invalid_user():
User(name='test').save() # no primary key field
self.assertRaises(ValidationError, create_invalid_user)
def define_invalid_user():
class EmailUser(User):
email = StringField(primary_key=True)
self.assertRaises(ValueError, define_invalid_user)
class EmailUser(User):
email = StringField()
user = User(username='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
self.assertEqual(user_obj.pk, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
user = User(pk='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
self.assertEqual(user_obj.pk, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
def test_document_not_registered(self):
class Place(Document):
name = StringField()
meta = {'allow_inheritance': True}
class NicePlace(Place):
pass
Place.drop_collection()
Place(name="London").save()
NicePlace(name="Buckingham Palace").save()
# Mimic Place and NicePlace definitions being in a different file
# and the NicePlace model not being imported in at query time.
from mongoengine.base import _document_registry
del(_document_registry['Place.NicePlace'])
def query_without_importing_nice_place():
print Place.objects.all()
self.assertRaises(NotRegistered, query_without_importing_nice_place)
def test_document_registry_regressions(self):
class Location(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Area(Location):
location = ReferenceField('Location', dbref=True)
Location.drop_collection()
self.assertEqual(Area, get_document("Area"))
self.assertEqual(Area, get_document("Location.Area"))
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_to_dbref(self):
"""Ensure that you can get a dbref of a document"""
person = self.Person(name="Test User", age=30)
self.assertRaises(OperationError, person.to_dbref)
person.save()
person.to_dbref()
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.objects.first()
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload('age')
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_reload_sharded(self):
class Animal(Document):
superphylum = StringField()
meta = {'shard_key': ('superphylum',)}
Animal.drop_collection()
doc = Animal(superphylum='Deuterostomia')
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_sharded_nested(self):
class SuperPhylum(EmbeddedDocument):
name = StringField()
class Animal(Document):
superphylum = EmbeddedDocumentField(SuperPhylum)
meta = {'shard_key': ('superphylum.name',)}
Animal.drop_collection()
doc = Animal(superphylum=SuperPhylum(name='Deuterostomia'))
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_referencing(self):
"""Ensures reloading updates weakrefs correctly
"""
class Embedded(EmbeddedDocument):
dict_field = DictField()
list_field = ListField()
class Doc(Document):
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
embedded_1 = Embedded()
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
doc.save()
doc = doc.reload(10)
doc.list_field.append(1)
doc.dict_field['woot'] = "woot"
doc.embedded_field.list_field.append(1)
doc.embedded_field.dict_field['woot'] = "woot"
self.assertEqual(doc._get_changed_fields(), [
'list_field', 'dict_field.woot', 'embedded_field.list_field',
'embedded_field.dict_field.woot'])
doc.save()
self.assertEqual(len(doc.list_field), 4)
doc = doc.reload(10)
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 4)
self.assertEqual(len(doc.dict_field), 2)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
doc.list_field.append(1)
doc.save()
doc.dict_field['extra'] = 1
doc = doc.reload(10, 'list_field')
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 5)
self.assertEqual(len(doc.dict_field), 3)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
def test_reload_doesnt_exist(self):
class Foo(Document):
pass
f = Foo()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
f.save()
f.delete()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
def test_reload_of_non_strict_with_special_field_name(self):
"""Ensures reloading works for documents with meta strict == False
"""
class Post(Document):
meta = {
'strict': False
}
title = StringField()
items = ListField()
Post.drop_collection()
Post._get_collection().insert({
"title": "Items eclipse",
"items": ["more lorem", "even more ipsum"]
})
post = Post.objects.first()
post.reload()
self.assertEqual(post.title, "Items eclipse")
self.assertEqual(post.items, ["more lorem", "even more ipsum"])
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30, job=self.Job())
self.assertEqual(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEqual(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEqual(len(person), 5)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document_to_mongo(self):
class Person(EmbeddedDocument):
name = StringField()
age = IntField()
meta = {"allow_inheritance": True}
class Employee(Person):
salary = IntField()
self.assertEqual(Person(name="Bob", age=35).to_mongo().keys(),
['_cls', 'name', 'age'])
self.assertEqual(
Employee(name="Bob", age=35, salary=0).to_mongo().keys(),
['_cls', 'name', 'age', 'salary'])
def test_embedded_document_to_mongo_id(self):
class SubDoc(EmbeddedDocument):
id = StringField(required=True)
sub_doc = SubDoc(id="abc")
self.assertEqual(sub_doc.to_mongo().keys(), ['id'])
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
def test_embedded_document_instance(self):
"""Ensure that embedded documents can reference parent instance
"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc(embedded_field=Embedded(string="Hi"))
self.assertHasInstance(doc.embedded_field, doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field, doc)
def test_embedded_document_complex_instance(self):
"""Ensure that embedded documents in complex fields can reference
parent instance"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
Doc.drop_collection()
doc = Doc(embedded_field=[Embedded(string="Hi")])
self.assertHasInstance(doc.embedded_field[0], doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field[0], doc)
def test_embedded_document_complex_instance_no_use_db_field(self):
"""Ensure that use_db_field is propagated to list of Emb Docs
"""
class Embedded(EmbeddedDocument):
string = StringField(db_field='s')
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
d = Doc(embedded_field=[Embedded(string="Hi")]).to_mongo(
use_db_field=False).to_dict()
self.assertEqual(d['embedded_field'], [{'string': 'Hi'}])
def test_instance_is_set_on_setattr(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
email = EmbeddedDocumentField(Email)
Account.drop_collection()
acc = Account()
acc.email = Email(email='test@example.com')
self.assertHasInstance(acc._data["email"], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["email"], acc1)
def test_instance_is_set_on_setattr_on_embedded_document_list(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
emails = EmbeddedDocumentListField(Email)
Account.drop_collection()
acc = Account()
acc.emails = [Email(email='test@example.com')]
self.assertHasInstance(acc._data["emails"][0], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["emails"][0], acc1)
def test_document_clean(self):
class TestDocument(Document):
status = StringField()
pub_date = DateTimeField()
def clean(self):
if self.status == 'draft' and self.pub_date is not None:
msg = 'Draft entries may not have a publication date.'
raise ValidationError(msg)
# Set the pub_date for published items if not set.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.now()
TestDocument.drop_collection()
t = TestDocument(status="draft", pub_date=datetime.now())
try:
t.save()
except ValidationError, e:
expect_msg = "Draft entries may not have a publication date."
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'__all__': expect_msg})
t = TestDocument(status="published")
t.save(clean=False)
self.assertEqual(t.pub_date, None)
t = TestDocument(status="published")
t.save(clean=True)
self.assertEqual(type(t.pub_date), datetime)
def test_document_embedded_clean(self):
class TestEmbeddedDocument(EmbeddedDocument):
x = IntField(required=True)
y = IntField(required=True)
z = IntField(required=True)
meta = {'allow_inheritance': False}
def clean(self):
if self.z:
if self.z != self.x + self.y:
raise ValidationError('Value of z != x + y')
else:
self.z = self.x + self.y
class TestDocument(Document):
doc = EmbeddedDocumentField(TestEmbeddedDocument)
status = StringField()
TestDocument.drop_collection()
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25, z=15))
try:
t.save()
except ValidationError, e:
expect_msg = "Value of z != x + y"
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'doc': {'__all__': expect_msg}})
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25)).save()
self.assertEqual(t.doc.z, 35)
# Asserts not raises
t = TestDocument(doc=TestEmbeddedDocument(x=15, y=35, z=5))
t.save(clean=False)
def test_modify_empty(self):
doc = self.Person(name="bob", age=10).save()
self.assertRaises(
InvalidDocumentError, lambda: self.Person().modify(set__age=10))
self.assertDbEqual([dict(doc.to_mongo())])
def test_modify_invalid_query(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
self.assertRaises(
InvalidQueryError,
lambda: doc1.modify(dict(id=doc2.id), set__value=20))
self.assertDbEqual(docs)
def test_modify_match_another_document(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
assert not doc1.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_not_exists(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(id=ObjectId(), name="jim", age=20)
docs = [dict(doc1.to_mongo())]
assert not doc2.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_update(self):
other_doc = self.Person(name="bob", age=10).save()
doc = self.Person(
name="jim", age=20, job=self.Job(name="10gen", years=3)).save()
doc_copy = doc._from_son(doc.to_mongo())
# these changes must go away
doc.name = "liza"
doc.job.name = "Google"
doc.job.years = 3
assert doc.modify(
set__age=21, set__job__name="MongoDB", unset__job__years=True)
doc_copy.age = 21
doc_copy.job.name = "MongoDB"
del doc_copy.job.years
assert doc.to_json() == doc_copy.to_json()
assert doc._get_changed_fields() == []
self.assertDbEqual([dict(other_doc.to_mongo()), dict(doc.to_mongo())])
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEqual(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.objects.first()
p0.name = 'wpjunior'
p0.save()
def test_save_max_recursion_not_hit_with_file_field(self):
class Foo(Document):
name = StringField()
picture = FileField()
bar = ReferenceField('self')
Foo.drop_collection()
a = Foo(name='hello').save()
a.bar = a
with open(TEST_IMAGE_PATH, 'rb') as test_image:
a.picture = test_image
a.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
b = Foo.objects.with_id(a.id)
b.name = 'world'
b.save()
self.assertEqual(b.picture, b.bar.picture, b.bar.bar.picture)
def test_save_cascades(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_kwargs(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p1.name = "Daddy Wilson"
p2.save(force_insert=True, cascade_kwargs={"force_insert": False})
p1.reload()
p2.reload()
self.assertEqual(p1.name, p2.parent.name)
def test_save_cascade_meta_false(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_meta_true(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save(cascade=True)
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
def test_save_cascades_generically(self):
class Person(Document):
name = StringField()
parent = GenericReferenceField()
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_atomicity_condition(self):
class Widget(Document):
toggle = BooleanField(default=False)
count = IntField(default=0)
save_id = UUIDField()
def flip(widget):
widget.toggle = not widget.toggle
widget.count += 1
def UUID(i):
return uuid.UUID(int=i)
Widget.drop_collection()
w1 = Widget(toggle=False, save_id=UUID(1))
# ignore save_condition on new record creation
w1.save(save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.save_id, UUID(1))
self.assertEqual(w1.count, 0)
# mismatch in save_condition prevents save and raise exception
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 0)
# matched save_condition allows save
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
w1.save(save_condition={'save_id': UUID(1)})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
# save_condition can be used to ensure atomic read & updates
# i.e., prevent interleaved reads and writes from separate contexts
w2 = Widget.objects.get()
self.assertEqual(w1, w2)
old_id = w1.save_id
flip(w1)
w1.save_id = UUID(2)
w1.save(save_condition={'save_id': old_id})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 2)
flip(w2)
flip(w2)
self.assertRaises(SaveConditionError,
w2.save, save_condition={'save_id': old_id})
w2.reload()
self.assertFalse(w2.toggle)
self.assertEqual(w2.count, 2)
# save_condition uses mongoengine-style operator syntax
flip(w1)
w1.save(save_condition={'count__lt': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
flip(w1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'count__gte': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
def test_update(self):
"""Ensure that an existing document is updated instead of be
overwritten."""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEqual(self.Person.objects.count(), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# Test only / exclude only updates included fields
person = self.Person.objects.only('name').get()
person.name = 'User'
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Test only / exclude can set non excluded / included fields
person = self.Person.objects.only('name').get()
person.name = 'Test'
person.age = 30
person.save()
person.reload()
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.name = 'User'
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Confirm does remove unrequired fields
person = self.Person.objects.exclude('name').get()
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, None)
person = self.Person.objects.get()
person.name = None
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, None)
self.assertEqual(person.age, None)
def test_inserts_if_you_set_the_pk(self):
p1 = self.Person(name='p1', id=bson.ObjectId()).save()
p2 = self.Person(name='p2')
p2.id = bson.ObjectId()
p2.save()
self.assertEqual(2, self.Person.objects.count())
def test_can_save_if_not_included(self):
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(
EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=bson.ObjectId)
reference_field = ReferenceField(Simple, default=lambda:
Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
decimal_field = DecimalField(default=1.0)
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(
default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(),
default=lambda: [1, 2, 3])
email_field = EmailField(default="ross@example.com")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(
default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
my_doc = Doc.objects.only("string_field").first()
my_doc.string_field = "string"
my_doc.save()
my_doc = Doc.objects.get(string_field="string")
self.assertEqual(my_doc.string_field, "string")
self.assertEqual(my_doc.int_field, 1)
def test_document_update(self):
def update_not_saved_raises():
person = self.Person(name='dcrosta')
person.update(set__name='Dan Crosta')
self.assertRaises(OperationError, update_not_saved_raises)
author = self.Person(name='dcrosta')
author.save()
author.update(set__name='Dan Crosta')
author.reload()
p1 = self.Person.objects.first()
self.assertEqual(p1.name, author.name)
def update_no_value_raises():
person = self.Person.objects.first()
person.update()
self.assertRaises(OperationError, update_no_value_raises)
def update_no_op_should_default_to_set():
person = self.Person.objects.first()
person.update(name="Dan")
person.reload()
return person.name
self.assertEqual("Dan", update_no_op_should_default_to_set())
def test_update_unique_field(self):
class Doc(Document):
name = StringField(unique=True)
doc1 = Doc(name="first").save()
doc2 = Doc(name="second").save()
self.assertRaises(NotUniqueError, lambda:
doc2.update(set__name=doc1.name))
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_save_only_changed_fields(self):
"""Ensure save only sets / unsets changed fields
"""
class User(self.Person):
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
user = User(name='Test User', age=30, active=True)
user.save()
user.reload()
# Simulated Race condition
same_person = self.Person.objects.get()
same_person.active = False
user.age = 21
user.save()
same_person.name = 'User'
same_person.save()
person = self.Person.objects.get()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
self.assertEqual(person.active, False)
def test_query_count_when_saving(self):
"""Ensure references don't cause extra fetches when saving"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
orgs = ListField(ReferenceField('Organization'))
class Feed(Document):
name = StringField()
class UserSubscription(Document):
name = StringField()
user = ReferenceField(User)
feed = ReferenceField(Feed)
Organization.drop_collection()
User.drop_collection()
Feed.drop_collection()
UserSubscription.drop_collection()
o1 = Organization(name="o1").save()
o2 = Organization(name="o2").save()
u1 = User(name="Ross", orgs=[o1, o2]).save()
f1 = Feed(name="MongoEngine").save()
sub = UserSubscription(user=u1, feed=f1).save()
user = User.objects.first()
# Even if stored as ObjectId's internally mongoengine uses DBRefs
# As ObjectId's aren't automatically derefenced
self.assertTrue(isinstance(user._data['orgs'][0], DBRef))
self.assertTrue(isinstance(user.orgs[0], Organization))
self.assertTrue(isinstance(user._data['orgs'][0], Organization))
# Changing a value
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.name = "Test Sub"
sub.save()
self.assertEqual(q, 2)
# Changing a value that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.user.name = "Test"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 3)
# Changing a value and one that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
sub.name = "Test Sub 2"
self.assertEqual(q, 1)
sub.user.name = "Test 2"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 4) # One for the UserSub and one for the User
# Saving with just the refs
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription(user=u1.pk, feed=f1.pk)
self.assertEqual(q, 0)
sub.save()
self.assertEqual(q, 1)
# Saving with just the refs on a ListField
with query_counter() as q:
self.assertEqual(q, 0)
User(name="Bob", orgs=[o1.pk, o2.pk]).save()
self.assertEqual(q, 1)
# Saving new objects
with query_counter() as q:
self.assertEqual(q, 0)
user = User.objects.first()
self.assertEqual(q, 1)
feed = Feed.objects.first()
self.assertEqual(q, 2)
sub = UserSubscription(user=user, feed=feed)
self.assertEqual(q, 2) # Check no change
sub.save()
self.assertEqual(q, 3)
def test_set_unset_one_operation(self):
"""Ensure that $set and $unset actions are performed in the same
operation.
"""
class FooBar(Document):
foo = StringField(default=None)
bar = StringField(default=None)
FooBar.drop_collection()
# write an entity with a single prop
foo = FooBar(foo='foo').save()
self.assertEqual(foo.foo, 'foo')
del foo.foo
foo.bar = 'bar'
with query_counter() as q:
self.assertEqual(0, q)
foo.save()
self.assertEqual(1, q)
def test_save_only_changed_fields_recursive(self):
"""Ensure save only sets / unsets changed fields
"""
class Comment(EmbeddedDocument):
published = BooleanField(default=True)
class User(self.Person):
comments_dict = DictField()
comments = ListField(EmbeddedDocumentField(Comment))
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
person = User(name='Test User', age=30, active=True)
person.comments.append(Comment())
person.save()
person.reload()
person = self.Person.objects.get()
self.assertTrue(person.comments[0].published)
person.comments[0].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments[0].published)
# Simple dict w
person.comments_dict['first_post'] = Comment()
person.save()
person = self.Person.objects.get()
self.assertTrue(person.comments_dict['first_post'].published)
person.comments_dict['first_post'].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments_dict['first_post'].published)
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(self.Person.objects.count(), 1)
person.delete()
self.assertEqual(self.Person.objects.count(), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""
Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False,
'indexes': [
{'fields': ['comments.user']}
]}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments=[Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments=[Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual(
[p1, p2],
list(Page.objects.filter(comments__user=u1)))
self.assertEqual(
[p1, p2, p4],
list(Page.objects.filter(comments__user=u2)))
self.assertEqual(
[p1, p3],
list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_embedded_update_after_save(self):
"""
Test update of `EmbeddedDocumentField` attached to a newly saved
document.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(
promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_object_mixins(self):
class NameMixin(object):
name = StringField()
class Foo(EmbeddedDocument, NameMixin):
quantity = IntField()
self.assertEqual(['name', 'quantity'], sorted(Foo._fields.keys()))
class Bar(Document, NameMixin):
widgets = StringField()
self.assertEqual(['id', 'name', 'widgets'], sorted(Bar._fields.keys()))
def test_mixin_inheritance(self):
class BaseMixIn(object):
count = IntField()
data = StringField()
class DoubleMixIn(BaseMixIn):
comment = StringField()
class TestDoc(Document, DoubleMixIn):
age = IntField()
TestDoc.drop_collection()
t = TestDoc(count=12, data="test",
comment="great!", age=19)
t.save()
t = TestDoc.objects.first()
self.assertEqual(t.age, 19)
self.assertEqual(t.comment, "great!")
self.assertEqual(t.data, "test")
self.assertEqual(t.count, 12)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.objects.first()
# Test laziness
self.assertTrue(isinstance(post_obj._data['author'],
bson.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_duplicate_db_fields_raise_invalid_document_error(self):
"""Ensure a InvalidDocumentError is thrown if duplicate fields
declare the same db_field"""
def throw_invalid_document_error():
class Foo(Document):
name = StringField()
name2 = StringField(db_field='name')
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_invalid_son(self):
"""Raise an error if loading invalid data"""
class Occurrence(EmbeddedDocument):
number = IntField()
class Word(Document):
stem = StringField()
count = IntField(default=1)
forms = ListField(StringField(), default=list)
occurs = ListField(EmbeddedDocumentField(Occurrence), default=list)
def raise_invalid_document():
Word._from_son({'stem': [1, 2, 3], 'forms': 1, 'count': 'one',
'occurs': {"hello": None}})
self.assertRaises(InvalidDocumentError, raise_invalid_document)
def test_reverse_delete_rule_cascade_and_nullify(self):
"""Ensure that a referenced document is also deleted upon deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
# No effect on the BlogPost
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_with_document_inheritance(self):
"""Ensure that a referenced document is also deleted upon deletion
of a child document.
"""
class Writer(self.Person):
pass
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = Writer(name='Test User')
author.save()
reviewer = Writer(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Writer should lead to deletion of the BlogPost
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_and_nullify_complex_field(self):
"""Ensure that a referenced document is also deleted upon deletion for
complex fields.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = ListField(ReferenceField(
self.Person, reverse_delete_rule=NULLIFY))
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.authors = [author]
post.reviewers = [reviewer]
post.save()
# Deleting the reviewer should have no effect on the BlogPost
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewers, [])
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_triggers_pre_delete_signal(self):
""" ensure the pre_delete signal is triggered upon a cascading deletion
setup a blog post with content, an author and editor
delete the author which triggers deletion of blogpost via cascade
blog post's pre_delete signal alters an editor attribute
"""
class Editor(self.Person):
review_queue = IntField(default=0)
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
editor = ReferenceField(Editor)
@classmethod
def pre_delete(cls, sender, document, **kwargs):
# decrement the docs-to-review count
document.editor.update(dec__review_queue=1)
signals.pre_delete.connect(BlogPost.pre_delete, sender=BlogPost)
self.Person.drop_collection()
BlogPost.drop_collection()
Editor.drop_collection()
author = self.Person(name='Will S.').save()
editor = Editor(name='Max P.', review_queue=1).save()
BlogPost(content='wrote some books', author=author,
editor=editor).save()
# delete the author, the post is also deleted due to the CASCADE rule
author.delete()
# the pre-delete signal should have decremented the editor's queue
editor = Editor.objects(name='Max P.').get()
self.assertEqual(editor.review_queue, 0)
def test_two_way_reverse_delete_rule(self):
"""Ensure that Bi-Directional relationships work with
reverse_delete_rule
"""
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
class Foo(Document):
content = StringField()
bar = ReferenceField(Bar)
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
Bar.drop_collection()
Foo.drop_collection()
b = Bar(content="Hello")
b.save()
f = Foo(content="world", bar=b)
f.save()
b.foo = f
b.save()
f.delete()
self.assertEqual(Bar.objects.count(), 1) # No effect on the BlogPost
self.assertEqual(Bar.objects.get().foo, None)
def test_invalid_reverse_delete_rule_raise_errors(self):
def throw_invalid_document_error():
class Blog(Document):
content = StringField()
authors = MapField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = DictField(
field=ReferenceField(
self.Person,
reverse_delete_rule=NULLIFY))
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def throw_invalid_document_error_embedded():
class Parents(EmbeddedDocument):
father = ReferenceField('Person', reverse_delete_rule=DENY)
mother = ReferenceField('Person', reverse_delete_rule=DENY)
self.assertRaises(
InvalidDocumentError, throw_invalid_document_error_embedded)
def test_reverse_delete_rule_cascade_recurs(self):
"""Ensure that a chain of documents is also deleted upon cascaded
deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
class Comment(Document):
text = StringField()
post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
comment = Comment(text='Kudos.')
comment.post = post
comment.save()
# Delete the Person, which should lead to deletion of the BlogPost,
# and, recursively to the Comment, too
author.delete()
self.assertEqual(Comment.objects.count(), 0)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
def test_reverse_delete_rule_deny(self):
"""Ensure that a document cannot be referenced if there are still
documents referring to it.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
# Delete the Person should be denied
self.assertRaises(OperationError, author.delete) # Should raise denied error
self.assertEqual(BlogPost.objects.count(), 1) # No objects may have been deleted
self.assertEqual(self.Person.objects.count(), 1)
# Other users, that don't have BlogPosts must be removable, like normal
author = self.Person(name='Another User')
author.save()
self.assertEqual(self.Person.objects.count(), 2)
author.delete()
self.assertEqual(self.Person.objects.count(), 1)
self.Person.drop_collection()
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEqual(A.objects.count(), 2)
self.assertEqual(B.objects.count(), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK")
self.assertEqual(all_user_dic.get(u2, False), "OK")
self.assertEqual(all_user_dic.get(u3, False), "OK")
self.assertEqual(all_user_dic.get(u4, False), False) # New object
self.assertEqual(all_user_dic.get(b1, False), False) # Other object
self.assertEqual(all_user_dic.get(b2, False), False) # Other object
# in Set
all_user_set = set(User.objects.all())
self.assertTrue(u1 in all_user_set)
def test_picklable(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
# Test pickling changed data
pickle_doc.lists.append("3")
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
resurrected.string = "Two"
resurrected.save()
pickle_doc = PickleTest.objects.first()
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(pickle_doc.string, "Two")
self.assertEqual(pickle_doc.lists, ["1", "2", "3"])
def test_regular_document_pickle(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
# Test that when a document's definition changes the new
# definition is used
fixtures.PickleTest = fixtures.NewDocumentPickleTest
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected.__class__,
fixtures.NewDocumentPickleTest)
self.assertEqual(resurrected._fields_ordered,
fixtures.NewDocumentPickleTest._fields_ordered)
self.assertNotEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
# The local PickleTest is still a ref to the original
fixtures.PickleTest = PickleTest
def test_dynamic_document_pickle(self):
pickle_doc = PickleDynamicTest(
name="test", number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleDyanmicEmbedded(foo="Bar")
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
self.assertEqual(resurrected._dynamic_fields.keys(),
pickle_doc._dynamic_fields.keys())
self.assertEqual(resurrected.embedded, pickle_doc.embedded)
self.assertEqual(resurrected.embedded._fields_ordered,
pickle_doc.embedded._fields_ordered)
self.assertEqual(resurrected.embedded._dynamic_fields.keys(),
pickle_doc.embedded._dynamic_fields.keys())
def test_picklable_on_signals(self):
pickle_doc = PickleSignalsTest(
number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickle_doc.save()
pickle_doc.delete()
def test_throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_mutating_documents(self):
class B(EmbeddedDocument):
field1 = StringField(default='field1')
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
A.drop_collection()
a = A()
a.save()
a.reload()
self.assertEqual(a.b.field1, 'field1')
class C(EmbeddedDocument):
c_field = StringField(default='cfield')
class B(EmbeddedDocument):
field1 = StringField(default='field1')
field2 = EmbeddedDocumentField(C, default=lambda: C())
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
a = A.objects()[0]
a.b.field2.c_field = 'new value'
a.save()
a.reload()
self.assertEqual(a.b.field2.c_field, 'new value')
def test_can_save_false_values(self):
"""Ensures you can save False values on save"""
class Doc(Document):
foo = StringField()
archived = BooleanField(default=False, required=True)
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_can_save_false_values_dynamic(self):
"""Ensures you can save False values on dynamic docs"""
class Doc(DynamicDocument):
foo = StringField()
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_do_not_save_unchanged_references(self):
"""Ensures cascading saves dont auto update"""
class Job(Document):
name = StringField()
class Person(Document):
name = StringField()
age = IntField()
job = ReferenceField(Job)
Job.drop_collection()
Person.drop_collection()
job = Job(name="Job 1")
# job should not have any changed fields after the save
job.save()
person = Person(name="name", age=10, job=job)
from pymongo.collection import Collection
orig_update = Collection.update
try:
def fake_update(*args, **kwargs):
self.fail("Unexpected update for %s" % args[0].name)
return orig_update(*args, **kwargs)
Collection.update = fake_update
person.save()
finally:
Collection.update = orig_update
def test_db_alias_tests(self):
""" DB Alias tests """
# mongoenginetest - Is default connection alias from setUp()
# Register Aliases
register_connection('testdb-1', 'mongoenginetest2')
register_connection('testdb-2', 'mongoenginetest3')
register_connection('testdb-3', 'mongoenginetest4')
class User(Document):
name = StringField()
meta = {"db_alias": "testdb-1"}
class Book(Document):
name = StringField()
meta = {"db_alias": "testdb-2"}
# Drops
User.drop_collection()
Book.drop_collection()
# Create
bob = User.objects.create(name="Bob")
hp = Book.objects.create(name="Harry Potter")
# Selects
self.assertEqual(User.objects.first(), bob)
self.assertEqual(Book.objects.first(), hp)
# DeReference
class AuthorBooks(Document):
author = ReferenceField(User)
book = ReferenceField(Book)
meta = {"db_alias": "testdb-3"}
# Drops
AuthorBooks.drop_collection()
ab = AuthorBooks.objects.create(author=bob, book=hp)
# select
self.assertEqual(AuthorBooks.objects.first(), ab)
self.assertEqual(AuthorBooks.objects.first().book, hp)
self.assertEqual(AuthorBooks.objects.first().author, bob)
self.assertEqual(AuthorBooks.objects.filter(author=bob).first(), ab)
self.assertEqual(AuthorBooks.objects.filter(book=hp).first(), ab)
# DB Alias
self.assertEqual(User._get_db(), get_db("testdb-1"))
self.assertEqual(Book._get_db(), get_db("testdb-2"))
self.assertEqual(AuthorBooks._get_db(), get_db("testdb-3"))
# Collections
self.assertEqual(
User._get_collection(),
get_db("testdb-1")[User._get_collection_name()])
self.assertEqual(
Book._get_collection(),
get_db("testdb-2")[Book._get_collection_name()])
self.assertEqual(
AuthorBooks._get_collection(),
get_db("testdb-3")[AuthorBooks._get_collection_name()])
def test_db_alias_overrides(self):
"""db_alias can be overriden
"""
# Register a connection with db_alias testdb-2
register_connection('testdb-2', 'mongoenginetest2')
class A(Document):
"""Uses default db_alias
"""
name = StringField()
meta = {"allow_inheritance": True}
class B(A):
"""Uses testdb-2 db_alias
"""
meta = {"db_alias": "testdb-2"}
A.objects.all()
self.assertEqual('testdb-2', B._meta.get('db_alias'))
self.assertEqual('mongoenginetest',
A._get_collection().database.name)
self.assertEqual('mongoenginetest2',
B._get_collection().database.name)
def test_db_alias_propagates(self):
"""db_alias propagates?
"""
register_connection('testdb-1', 'mongoenginetest2')
class A(Document):
name = StringField()
meta = {"db_alias": "testdb-1", "allow_inheritance": True}
class B(A):
pass
self.assertEqual('testdb-1', B._meta.get('db_alias'))
def test_db_ref_usage(self):
""" DB Ref usage in dict_fields"""
class User(Document):
name = StringField()
class Book(Document):
name = StringField()
author = ReferenceField(User)
extra = DictField()
meta = {
'ordering': ['+name']
}
def __unicode__(self):
return self.name
def __str__(self):
return self.name
# Drops
User.drop_collection()
Book.drop_collection()
# Authors
bob = User.objects.create(name="Bob")
jon = User.objects.create(name="Jon")
# Redactors
karl = User.objects.create(name="Karl")
susan = User.objects.create(name="Susan")
peter = User.objects.create(name="Peter")
# Bob
Book.objects.create(name="1", author=bob, extra={
"a": bob.to_dbref(), "b": [karl.to_dbref(), susan.to_dbref()]})
Book.objects.create(name="2", author=bob, extra={
"a": bob.to_dbref(), "b": karl.to_dbref()})
Book.objects.create(name="3", author=bob, extra={
"a": bob.to_dbref(), "c": [jon.to_dbref(), peter.to_dbref()]})
Book.objects.create(name="4", author=bob)
# Jon
Book.objects.create(name="5", author=jon)
Book.objects.create(name="6", author=peter)
Book.objects.create(name="7", author=jon)
Book.objects.create(name="8", author=jon)
Book.objects.create(name="9", author=jon,
extra={"a": peter.to_dbref()})
# Checks
self.assertEqual(",".join([str(b) for b in Book.objects.all()]),
"1,2,3,4,5,6,7,8,9")
# bob related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a=bob) |
Q(author=bob) |
Q(extra__b=bob))]),
"1,2,3,4")
# Susan & Karl related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a__all=[karl, susan]) |
Q(author__all=[karl, susan]) |
Q(extra__b__all=[
karl.to_dbref(), susan.to_dbref()]))
]), "1")
# $Where
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
__raw__={
"$where": """
function(){
return this.name == '1' ||
this.name == '2';}"""
})]),
"1,2")
def test_switch_db_instance(self):
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
with switch_db(Group, 'testdb-1') as Group:
Group.drop_collection()
Group(name="hello - default").save()
self.assertEqual(1, Group.objects.count())
group = Group.objects.first()
group.switch_db('testdb-1')
group.name = "hello - testdb!"
group.save()
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - testdb!", group.name)
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Slightly contrived now - perform an update
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.update(set__name="hello - update")
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - update", group.name)
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Totally contrived now - perform a delete
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.delete()
with switch_db(Group, 'testdb-1') as Group:
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
def test_load_undefined_fields(self):
class User(Document):
name = StringField()
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_with_strict_false(self):
class User(Document):
name = StringField()
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertFalse(hasattr(user, 'foo'))
self.assertEqual(user._data['foo'], 'Bar')
self.assertFalse(hasattr(user, 'data'))
self.assertEqual(user._data['data'], [1, 2, 3])
def test_load_undefined_fields_on_embedded_document(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false_on_doc(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false(self):
class Thing(EmbeddedDocument):
name = StringField()
meta = {'strict': False}
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertEqual(user.thing.name, 'My thing')
self.assertFalse(hasattr(user.thing, 'foo'))
self.assertEqual(user.thing._data['foo'], 'Bar')
self.assertFalse(hasattr(user.thing, 'data'))
self.assertEqual(user.thing._data['data'], [1, 2, 3])
def test_spaces_in_keys(self):
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
setattr(doc, 'hello world', 1)
doc.save()
one = Doc.objects.filter(**{'hello world': 1}).count()
self.assertEqual(1, one)
def test_shard_key(self):
class LogEntry(Document):
machine = StringField()
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_in_embedded_document(self):
class Foo(EmbeddedDocument):
foo = StringField()
class Bar(Document):
meta = {
'shard_key': ('foo.foo',)
}
foo = EmbeddedDocumentField(Foo)
bar = StringField()
foo_doc = Foo(foo='hello')
bar_doc = Bar(foo=foo_doc, bar='world')
bar_doc.save()
self.assertTrue(bar_doc.id is not None)
bar_doc.bar = 'baz'
bar_doc.save()
def change_shard_key():
bar_doc.foo.foo = 'something'
bar_doc.save()
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_primary(self):
class LogEntry(Document):
machine = StringField(primary_key=True)
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_kwargs_simple(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
doc = EmbeddedDocumentField(Embedded)
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.doc == other.doc)
classic_doc = Doc(doc_name="my doc", doc=Embedded(name="embedded doc"))
dict_doc = Doc(**{"doc_name": "my doc",
"doc": {"name": "embedded doc"}})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_kwargs_complex(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
docs = ListField(EmbeddedDocumentField(Embedded))
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.docs == other.docs)
classic_doc = Doc(doc_name="my doc", docs=[
Embedded(name="embedded doc1"),
Embedded(name="embedded doc2")])
dict_doc = Doc(**{"doc_name": "my doc",
"docs": [{"name": "embedded doc1"},
{"name": "embedded doc2"}]})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_positional_creation(self):
"""Ensure that document may be created using positional arguments.
"""
person = self.Person("Test User", 42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation(self):
"""Ensure that document may be created using mixed arguments.
"""
person = self.Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation_dynamic(self):
"""Ensure that document may be created using mixed arguments.
"""
class Person(DynamicDocument):
name = StringField()
person = Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_bad_mixed_creation(self):
"""Ensure that document gives correct error when duplicating arguments
"""
def construct_bad_instance():
return self.Person("Test User", 42, name="Bad User")
self.assertRaises(TypeError, construct_bad_instance)
def test_data_contains_id_field(self):
"""Ensure that asking for _data returns 'id'
"""
class Person(Document):
name = StringField()
Person.drop_collection()
Person(name="Harry Potter").save()
person = Person.objects.first()
self.assertTrue('id' in person._data.keys())
self.assertEqual(person._data.get('id'), person.id)
def test_complex_nesting_document_and_embedded_document(self):
class Macro(EmbeddedDocument):
value = DynamicField(default="UNDEFINED")
class Parameter(EmbeddedDocument):
macros = MapField(EmbeddedDocumentField(Macro))
def expand(self):
self.macros["test"] = Macro()
class Node(Document):
parameters = MapField(EmbeddedDocumentField(Parameter))
def expand(self):
self.flattened_parameter = {}
for parameter_name, parameter in self.parameters.iteritems():
parameter.expand()
class NodesSystem(Document):
name = StringField(required=True)
nodes = MapField(ReferenceField(Node, dbref=False))
def save(self, *args, **kwargs):
for node_name, node in self.nodes.iteritems():
node.expand()
node.save(*args, **kwargs)
super(NodesSystem, self).save(*args, **kwargs)
NodesSystem.drop_collection()
Node.drop_collection()
system = NodesSystem(name="system")
system.nodes["node"] = Node()
system.save()
system.nodes["node"].parameters["param"] = Parameter()
system.save()
system = NodesSystem.objects.first()
self.assertEqual(
"UNDEFINED",
system.nodes["node"].parameters["param"].macros["test"].value)
def test_embedded_document_equality(self):
class Test(Document):
field = StringField(required=True)
class Embedded(EmbeddedDocument):
ref = ReferenceField(Test)
Test.drop_collection()
test = Test(field='123').save() # has id
e = Embedded(ref=test)
f1 = Embedded._from_son(e.to_mongo())
f2 = Embedded._from_son(e.to_mongo())
self.assertEqual(f1, f2)
f1.ref # Dereferences lazily
self.assertEqual(f1, f2)
def test_dbref_equality(self):
class Test2(Document):
name = StringField()
class Test3(Document):
name = StringField()
class Test(Document):
name = StringField()
test2 = ReferenceField('Test2')
test3 = ReferenceField('Test3')
Test.drop_collection()
Test2.drop_collection()
Test3.drop_collection()
t2 = Test2(name='a')
t2.save()
t3 = Test3(name='x')
t3.id = t2.id
t3.save()
t = Test(name='b', test2=t2, test3=t3)
f = Test._from_son(t.to_mongo())
dbref2 = f._data['test2']
obj2 = f.test2
self.assertTrue(isinstance(dbref2, DBRef))
self.assertTrue(isinstance(obj2, Test2))
self.assertTrue(obj2.id == dbref2.id)
self.assertTrue(obj2 == dbref2)
self.assertTrue(dbref2 == obj2)
dbref3 = f._data['test3']
obj3 = f.test3
self.assertTrue(isinstance(dbref3, DBRef))
self.assertTrue(isinstance(obj3, Test3))
self.assertTrue(obj3.id == dbref3.id)
self.assertTrue(obj3 == dbref3)
self.assertTrue(dbref3 == obj3)
self.assertTrue(obj2.id == obj3.id)
self.assertTrue(dbref2.id == dbref3.id)
self.assertFalse(dbref2 == dbref3)
self.assertFalse(dbref3 == dbref2)
self.assertTrue(dbref2 != dbref3)
self.assertTrue(dbref3 != dbref2)
self.assertFalse(obj2 == dbref3)
self.assertFalse(dbref3 == obj2)
self.assertTrue(obj2 != dbref3)
self.assertTrue(dbref3 != obj2)
self.assertFalse(obj3 == dbref2)
self.assertFalse(dbref2 == obj3)
self.assertTrue(obj3 != dbref2)
self.assertTrue(dbref2 != obj3)
def test_default_values(self):
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
p = Person(name='alon')
p.save()
orig_created_on = Person.objects().only('created_on')[0].created_on
p2 = Person.objects().only('name')[0]
p2.name = 'alon2'
p2.save()
p3 = Person.objects().only('created_on')[0]
self.assertEquals(orig_created_on, p3.created_on)
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
height = IntField(default=189)
p4 = Person.objects()[0]
p4.save()
self.assertEquals(p4.height, 189)
self.assertEquals(Person.objects(height=189).count(), 1)
def test_from_son(self):
# 771
class MyPerson(self.Person):
meta = dict(shard_key=["id"])
p = MyPerson.from_json('{"name": "name", "age": 27}', created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
p = MyPerson._from_son({"name": "name", "age": 27}, created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
def test_null_field(self):
# 734
class User(Document):
name = StringField()
height = IntField(default=184, null=True)
str_fld = StringField(null=True)
int_fld = IntField(null=True)
flt_fld = FloatField(null=True)
dt_fld = DateTimeField(null=True)
cdt_fld = ComplexDateTimeField(null=True)
User.objects.delete()
u = User(name='user')
u.save()
u_from_db = User.objects.get(name='user')
u_from_db.height = None
u_from_db.save()
self.assertEquals(u_from_db.height, None)
# 864
self.assertEqual(u_from_db.str_fld, None)
self.assertEqual(u_from_db.int_fld, None)
self.assertEqual(u_from_db.flt_fld, None)
self.assertEqual(u_from_db.dt_fld, None)
self.assertEqual(u_from_db.cdt_fld, None)
# 735
User.objects.delete()
u = User(name='user')
u.save()
User.objects(name='user').update_one(set__height=None, upsert=True)
u_from_db = User.objects.get(name='user')
self.assertEquals(u_from_db.height, None)
def test_not_saved_eq(self):
"""Ensure we can compare documents not saved.
"""
class Person(Document):
pass
p = Person()
p1 = Person()
self.assertNotEqual(p, p1)
self.assertEqual(p, p)
def test_list_iter(self):
# 914
class B(EmbeddedDocument):
v = StringField()
class A(Document):
l = ListField(EmbeddedDocumentField(B))
A.objects.delete()
A(l=[B(v='1'), B(v='2'), B(v='3')]).save()
a = A.objects.get()
self.assertEqual(a.l._instance, a)
for idx, b in enumerate(a.l):
self.assertEqual(b._instance, a)
self.assertEqual(idx, 2)
if __name__ == '__main__':
unittest.main()
|
larsbutler/mongoengine
|
tests/document/instance.py
|
Python
|
mit
| 99,915
|
[
"exciting"
] |
c4690ac4f9553b20a2f0355a1f2a32a13b6bfee256df2a70903b33de5597da79
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Feb 2, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Feb 2, 2012"
import unittest
import os
import json
from monty.json import MontyDecoder
from pymatgen import Composition
from pymatgen.apps.battery.conversion_battery import ConversionElectrode, \
ConversionVoltagePair
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class ConversionElectrodeTest(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
formulas = ['LiCoO2', "FeF3"]
expected_properties = {}
expected_properties['LiCoO2'] = {'average_voltage': 2.26940307125,
'capacity_grav': 903.19752911225669,
'capacity_vol': 2903.35804724,
'specific_energy': 2049.7192465127678,
'energy_density': 6588.8896693479574}
expected_properties['FeF3'] = {'average_voltage': 3.06179925889,
'capacity_grav': 601.54508701578118,
'capacity_vol': 2132.2069115142394,
'specific_energy': 1841.8103016131706,
'energy_density': 6528.38954147}
for f in formulas:
with open(os.path.join(test_dir, f + "_batt.json"), 'r') as fid:
entries = json.load(fid, cls=MontyDecoder)
#entries = computed_entries_from_json(fid.read())
# with open(os.path.join(test_dir, f + "_batt.json"), 'w') as fid:
#json.dump(entries, fid, cls=MontyEncoder)
c = ConversionElectrode.from_composition_and_entries(
Composition(f), entries)
self.assertEqual(len(c.get_sub_electrodes(True)), c.num_steps)
self.assertEqual(len(c.get_sub_electrodes(False)),
sum(range(1, c.num_steps + 1)))
self.assertIsNotNone(str(c))
p = expected_properties[f]
for k, v in p.items():
self.assertAlmostEqual(getattr(c, "get_" + k).__call__(), v, 2)
self.assertIsNotNone(c.get_summary_dict(True))
#Test pair to dict
pair = c.voltage_pairs[0]
d = pair.as_dict()
pair2 = ConversionVoltagePair.from_dict(d)
for prop in ['voltage', 'mass_charge', 'mass_discharge']:
self.assertEqual(getattr(pair, prop), getattr(pair2, prop), 2)
#Test
d = c.as_dict()
electrode = ConversionElectrode.from_dict(d)
for k, v in p.items():
self.assertAlmostEqual(getattr(electrode,
"get_" + k).__call__(), v, 2)
if __name__ == "__main__":
unittest.main()
|
matk86/pymatgen
|
pymatgen/apps/battery/tests/test_conversion_battery.py
|
Python
|
mit
| 3,213
|
[
"pymatgen"
] |
861bd31079f433c4ec89c7bb244ac7297309683da4c131ab520c635b5347fe17
|
from __future__ import division
import numpy as np
import math
import copy
class KpointsError(Exception):
def __init__(self,msg):
self.msg = msg
def __str__(self):
return self.msg
class Kpoints:
"""
The Kpoints class contains:
self.header: (str) the first line from the KPOINTS file being read
self.num_points: (int) contains the value in the second line (0=>automatic)
self.subdivisions: (list of int) the number of kpoints along each of the vectors in reciprocal space
or the kpoint total "length" if the mode is Automatic
self.automode: (str) Gamma/Monkhorst-Pack/Automatic
self.shift: (list of float) the shifts that are added to the automatically generated points
"""
def __init__(self,filename):
""" Constructs a Kpoints object from a KPOINTS file """
self.read(filename)
def read(self,filename):
""" Reads a KPOINTS file """
try:
file = open(filename)
except IOError:
raise KpointsError('IOError',filename)
# read header
self.header = file.readline().strip()
# read second line
line = file.readline().strip()
if len(line)<=0:
raise KpointsError("Could not read number of points: '" + line + "'")
words = line.split()
if words[0]!='0':
raise KpointsError("Non-automatic kpoint generation not implemented")
try:
self.num_points = [int(words[0])]
except ValueError:
raise KpointsError("Illegal line for the number of points")
# read third line
self.automode = file.readline().strip()
if self.automode[0].lower() not in ['m','g', 'a']:
raise KpointsError("Illegal mode: '" + self.automode + "'")
# read subdivisions line
line = file.readline()
if self.automode[0].lower() == 'a':
nEntries = 1
else:
nEntries = 3
try:
self.subdivisions = [int(word) for word in line.split()[0:nEntries]]
except ValueError:
raise KpointsError("The subdivisions line could not be read: '" + line + "'")
# read shift line
self.shift = [0.0, 0.0, 0.0]
line = file.readline()
if not line == '':
if self.automode[0].lower() == 'a':
raise KpointsError("Fully automatic k-point mesh generation doesn't support shifts! \
\n Please remove shift line: '" + line + "'")
if len(line.split()) != 0:
if len(line.split()) < 3:
raise KpointsError("The shift line could not be understood: '" + line + "'")
try:
self.shift = [float(word) for word in line.split()[0:3]]
except ValueError:
raise KpointsError("The shift line could not be read: '" + line + "'")
file.close()
def super_kpoints(self, prim, super):
""" Assuming 'self' is the kpoints associated with a PRIM, it uses a scaling method to calculate
the kpoint-mesh for a supercell, such that it has a equal or greater kpoint
density than the prim. If the kpoints associated with a PRIM are mode 'a' then this
process is bypassed: VASP will correctly scale the kpoints by the supercell reciprocal
lattice at runtime.
Returns:
super_kpoints: a Kpoints object for the supercell
Args:
prim: Poscar object for the prim OR None (if self.automode = 'a')
super: a Poscar object for the supercell (not used if self.automode = 'a')
"""
super_kpoints = copy.deepcopy(self)
if self.automode[0].lower() == 'a':
# Do nothing if we're using a fully-automatic k-point mesh - VASP will deal with this for us
pass
elif True:
if prim == None:
raise KpointsError("No POSCAR was provided for the PRIM, so the PRIM KPOINTS could not be scaled!")
super_kpoints.subdivisions = [1, 1, 1]
# calculate prim volumetric kpoint densities
prim_density = self.density(prim)
# calculate recip lattice vector lengths
super_recip_vec_lengths = [np.linalg.norm(super.reciprocal_lattice(x)) for x in range(3)]
# while supercell kpoint density is less than prim kpoint density
while super_kpoints.density(super) < prim_density:
# increase the number of subdivisions along the least dense super recip vector
linear_density = [super_kpoints.subdivisions[x]/super_recip_vec_lengths[x] for x in range(3)]
min_index = linear_density.index(min(linear_density))
super_kpoints.subdivisions[min_index] += 1
# set all subdivisions to be at similar linear density
scale = super_kpoints.subdivisions[min_index] / super_recip_vec_lengths[min_index]
for i in range(3):
super_kpoints.subdivisions[i] = int(math.ceil(scale * super_recip_vec_lengths[i]))
# end while
else:
# calculate recip lattice vector lengths
super_recip_vec_lengths = [np.linalg.norm(super.reciprocal_lattice(x)) for x in range(3)]
prim_recip_vec_lengths = [np.linalg.norm(prim.reciprocal_lattice(x)) for x in range(3)]
# set estimated super_kpoints subdivisions, using prim subdivisions/recip length along shortest recip vector
short_ind = np.argmin(np.array(prim_recip_vec_lengths))
shortest = prim_recip_vec_lengths[short_ind]
effective_subdivisions = self.subdivisions[short_index]
scale = effective_subdivisions / shortest
for i in range(3):
super_kpoints.subdivisions[i] = int(math.ceil(scale * super_recip_vec_lengths[i]))
# calculate kpoint densities
prim_density = self.density(prim)
super_density = super_kpoints.density(super)
# increase effective prim subdivisions until super_density >= prim_density
while(super_density < prim_density):
effective_subdivisions += 1
scale = effective_subdivisions / shortest
for i in range(3):
super_kpoints.subdivisions[i] = int(math.ceil(scale * super_recip_vec_lengths[i]))
super_density = super_kpoints.density(super)
return super_kpoints
def density(self, poscar):
""" Return the kpoint density with respect to a Poscar.
Args:
poscar: a Poscar object
"""
return (self.subdivisions[0] * self.subdivisions[1] * self.subdivisions[2]) / poscar.reciprocal_volume()
def write(self,filename):
""" Write a KPOINTS file """
try:
file = open(filename,'w')
except IOError:
raise KpointsError("Write failed")
file.write(self.header+'\n')
file.write(str(self.num_points).translate(None,'[],')+'\n')
file.write(self.automode+'\n')
file.write(str(self.subdivisions).translate(None,'[],')+'\n')
if self.automode[0].lower() != 'a':
file.write(str(self.shift).translate(None,'[],')+'\n')
file.close()
return
|
jbechtel/CASMcode
|
python/vasp/vasp/io/kpoints.py
|
Python
|
lgpl-2.1
| 7,809
|
[
"VASP"
] |
e766a35945db913cde6287d3dd0d12e9f5eae26866e27186b2ab0db6545c29f3
|
########################################################################
# $HeadURL$
# File : CSCLI.py
# Author : Adria Casajus
########################################################################
__RCSID__ = "$Id$"
import cmd
import sys
import signal
import types
from DIRAC.Core.Utilities.ColorCLI import colorize
from DIRAC.ConfigurationSystem.private.Modificator import Modificator
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import gLogger
def _showTraceback():
import traceback
excepType, execpValue = sys.exc_info()[:2]
print "________________________\n"
print "Exception", excepType, ":", execpValue
traceback.print_tb( sys.exc_info()[2] )
print "________________________\n"
def _printComment( comment ):
commentList = comment.split( "\n" )
for commentLine in commentList[ :-1 ]:
print "# %s" % commentLine.strip()
def _appendExtensionIfMissing( filename ):
dotPosition = filename.rfind( "." )
if dotPosition > -1:
filename = filename[ :dotPosition ]
return "%s.cfg" % filename
class CSCLI( cmd.Cmd ):
def __init__( self ):
cmd.Cmd.__init__( self )
self.connected = False
self.masterURL = "unset"
self.writeEnabled = False
self.modifiedData = False
self.rpcClient = None
self.do_connect()
if self.connected:
self.modificator = Modificator ( self.rpcClient )
else:
self.modificator = Modificator()
self.identSpace = 20
self.backupFilename = "dataChanges"
self._initSignals()
#User friendly hack
self.do_exit = self.do_quit
def start( self ):
if self.connected:
self.modificator.loadFromRemote()
retVal = self.modificator.loadCredentials()
if not retVal[ 'OK' ]:
print "There was an error gathering your credentials"
print retVal[ 'Message' ]
self._setStatus( False )
try:
self.cmdloop()
except KeyboardInterrupt:
gLogger.warn( "Received a keyboard interrupt." )
self.do_quit( "" )
def _initSignals( self ):
"""
Registers signal handlers
"""
for sigNum in ( signal.SIGINT, signal.SIGQUIT, signal.SIGKILL, signal.SIGTERM ):
try:
signal.signal( sigNum, self.do_quit )
except:
pass
def _setConnected( self, connected, writeEnabled ):
self.connected = connected
self.modifiedData = False
self.writeEnabled = writeEnabled
if connected:
if writeEnabled:
self.prompt = "(%s)-%s> " % ( self.masterURL, colorize( "Connected", "green" ) )
else:
self.prompt = "(%s)-%s> " % ( self.masterURL, colorize( "Connected (RO)", "yellow" ) )
else:
self.prompt = "(%s)-%s> " % ( self.masterURL, colorize( "Disconnected", "red" ) )
def _printPair( self, key, value, separator = ":" ):
valueList = value.split( "\n" )
print "%s%s%s %s" % ( key, " " * ( self.identSpace - len( key ) ), separator, valueList[0].strip() )
for valueLine in valueList[ 1:-1 ]:
print "%s %s" % ( " " * self.identSpace, valueLine.strip() )
def do_quit( self, dummy ):
"""
Exits the application without sending changes to server
Usage: quit
"""
if self.modifiedData:
print "Changes are about to be written to file for later use."
self.do_writeToFile( self.backupFilename )
print "Changes written to %s.cfg" % self.backupFilename
sys.exit( 0 )
def do_EOF( self, args ):
"""
Accepts ctrl^D to quit CLI
"""
print ""
self.do_quit( args )
def do_help( self, args ):
"""
Shows help information
Usage: help <command>
If no command is specified all commands are shown
"""
if len( args ) == 0:
print "\nAvailable commands:\n"
attrList = dir( self )
attrList.sort()
for attribute in attrList:
if attribute.find( "do_" ) == 0:
self._printPair( attribute[ 3: ], getattr( self, attribute ).__doc__[ 1: ] )
print ""
else:
command = args.split()[0].strip()
try:
obj = getattr( self, "do_%s" % command )
except:
print "There's no such %s command" % command
return
self._printPair( command, obj.__doc__[1:] )
# def retrieveData( self ):
# if not self.connected:
# return False
# response = self.rpcClient.dumpCompressed()
# if response[ 'Status' ] == 'OK':
# self.cDataHolder.loadFromCompressedSource( response[ 'Value' ] )
# gLogger.info( "Data retrieved from server." )
# return True
# else:
# gLogger.error( "Can't retrieve updated data from server." )
# return False
def _setStatus( self, connected = True ):
if not connected:
self.masterURL = "unset"
self._setConnected( False, False )
else:
retVal = self.rpcClient.writeEnabled()
if retVal[ 'OK' ]:
if retVal[ 'Value' ] == True:
self._setConnected( True, True )
else:
self._setConnected( True, False )
else:
print "Server returned an error: %s" % retVal[ 'Message' ]
self._setConnected( True, False )
def _tryConnection( self ):
print "Trying connection to %s" % self.masterURL
try:
self.rpcClient = RPCClient( self.masterURL )
self._setStatus()
except Exception, x:
gLogger.error( "Couldn't connect to %s (%s)" % ( self.masterURL, str( x ) ) )
self._setStatus( False )
def do_connect( self, args = '' ):
"""
Connects to configuration master server (in specified url if provided).
Usage: connect <url>
"""
if not args or type( args ) not in types.StringTypes:
self.masterURL = gConfigurationData.getMasterServer()
if self.masterURL != "unknown" and self.masterURL:
self._tryConnection()
else:
self._setStatus( False )
else:
splitted = args.split()
if len( splitted ) == 0:
print "Must specify witch url to connect"
self._setStatus( False )
else:
self.masterURL = splitted[0].strip()
self._tryConnection()
def do_sections( self, args ):
"""
Shows all sections with their comments.
If no section is specified, root is taken.
Usage: sections <section>
"""
try:
argList = args.split()
if argList:
baseSection = argList[0].strip()
else:
baseSection = "/"
if not self.modificator.existsSection( baseSection ):
print "Section %s does not exist" % baseSection
return
sectionList = self.modificator.getSections( baseSection )
if not sectionList:
print "Section %s is empty" % baseSection
return
for section in sectionList:
section = "%s/%s" % ( baseSection, section )
self._printPair( section, self.modificator.getComment( section ) , "#" )
except:
_showTraceback()
def do_options( self, args ):
"""
Shows all options and values of a specified section
Usage: options <section>
"""
try:
argList = args.split()
if argList:
section = argList[0].strip()
else:
print "Which section?"
return
if not self.modificator.existsSection( section ):
print "Section %s does not exist" % section
return
optionsList = self.modificator.getOptions( section )
if not optionsList:
print "Section %s has no options" % section
return
for option in optionsList:
_printComment( self.modificator.getComment( "%s/%s" % ( section, option ) ) )
self._printPair( option, self.modificator.getValue( "%s/%s" % ( section, option ) ), "=" )
except:
_showTraceback()
def do_get( self, args ):
"""
Shows value and comment for specified option in section
Usage: get <path to option>
"""
try:
argList = args.split()
if argList:
optionPath = argList[0].strip()
else:
print "Which option?"
return
if self.modificator.existsOption( optionPath ):
option = optionPath.split( "/" )[-1]
_printComment( self.modificator.getComment( optionPath ) )
self._printPair( option, self.modificator.getValue( optionPath ), "=" )
else:
print "Option %s does not exist" % optionPath
except:
_showTraceback()
def do_writeToServer( self, dummy ):
"""
Sends changes to server.
Usage: writeToServer
"""
if not self.connected:
print "You are not connected!"
return
try:
if not self.writeEnabled:
print "This server can't receive data modifications"
return
if not self.modifiedData:
while True:
choice = raw_input( "Data has not been modified, do you still want to upload changes? yes/no [no]: " )
choice = choice.lower()
if choice in ( "yes", "y" ):
break
else:
print "Commit aborted"
return
choice = raw_input( "Do you really want to send changes to server? yes/no [no]: " )
choice = choice.lower()
if choice in ( "yes", "y" ):
print "Uploading changes to %s (It may take some seconds)..." % self.masterURL
response = self.modificator.commit()
if response[ 'OK' ]:
self.modifiedData = False
print "Data sent to server."
self.modificator.loadFromRemote()
else:
print "Error sending data, server said: %s" % response['Message']
return
else:
print "Commit aborted"
except Exception, x:
_showTraceback()
print "Could not upload changes. ", str( x )
def do_set( self, args ):
"""
Sets option's value
Usage: set <optionPath> <value>...
From second argument until the last one is considered option's value
NOTE: If specified section does not exist it is created.
"""
try:
argsList = args.split()
if len( argsList ) < 2:
print "Must specify option and value to use"
return
optionPath = argsList[0].strip()
value = " ".join( argsList[1:] ).strip()
self.modificator.setOptionValue( optionPath, value )
self.modifiedData = True
except Exception, x:
print "Cannot insert value: ", str( x )
def do_removeOption( self, args ):
"""
Removes an option.
Usage: removeOption <option>
There can be empty sections.
"""
try:
argsList = args.split()
if len( argsList ) < 1:
print "Must specify option to delete"
return
optionPath = argsList[0].strip()
choice = raw_input( "Are you sure you want to delete %s? yes/no [no]: " % optionPath )
choice = choice.lower()
if choice in ( "yes", "y", "true" ):
if self.modificator.removeOption( optionPath ):
self.modifiedData = True
else:
print "Can't be deleted"
else:
print "Aborting removal."
except Exception, x:
print "Error removing option, %s" % str( x )
def do_removeSection( self, args ):
"""
Removes a section.
Usage: removeSection <section>
"""
try:
argsList = args.split()
if len( argsList ) < 1:
print "Must specify section to delete"
return
section = argsList[0].strip()
choice = raw_input( "Are you sure you want to delete %s? yes/no [no]: " % section )
choice = choice.lower()
if choice in ( "yes", "y", "true" ):
if self.modificator.removeSection( section ):
self.modifiedData = True
else:
print "Can't be deleted"
else:
print "Aborting removal."
except Exception, x:
print "Error removing section, %s" % str( x )
def do_setComment( self, args ):
"""
Sets option or section's comment. Requested entry MUST exist.
Usage: set <option/section> <comment>...
From third argument until the last one is considered option's comment.
"""
try:
argsList = args.split()
if len( argsList ) < 2:
print "Must specify option and value to use"
return
entryPath = argsList[0].strip()
value = " ".join( argsList[1:] ).strip()
self.modificator.setComment( entryPath, value )
self.modifiedData = True
except Exception, x:
print "Cannot insert comment: ", str( x )
def do_writeToFile( self, args ):
"""
Writes modification to file for later use.
Usage: writeToFile <filename>.cfg
Note that if a file extension is specified, it is replaced by .cfg suffix.
If not it is added automatically
"""
try:
if len( args ) == 0:
print "Filename to write must be specified!"
return
filename = args.split()[0].strip()
filename = _appendExtensionIfMissing( filename )
self.modificator.dumpToFile( filename )
except Exception, x:
print "Couldn't write to file %s: %s" % ( filename, str( x ) )
def do_readFromFile( self, args ):
"""
Reads data from filename to be used. Actual data will be replaced!
Usage: readFromFile <filename>.cfg
Note that if a file extension is specified, it is replaced by .cfg suffix.
If not it is added automatically
"""
try:
if len( args ) == 0:
print "Filename to read must be specified!"
return
filename = args.split()[0].strip()
filename = _appendExtensionIfMissing( filename )
self.modificator.loadFromFile( filename )
self.modifiedData = True
except Exception, x:
print "Couldn't read from file %s: %s" % ( filename, str( x ) )
def do_mergeFromFile( self, args ):
"""
Reads data from filename and merges it with current data.
Data read from file has more precedence that current one.
Usage: mergeFromFile <filename>.cfg
Note that if a file extension is specified, it is replaced by .cfg suffix.
If not it is added automatically
"""
try:
if len( args ) == 0:
print "Filename to read must be specified!"
return
filename = args.split()[0].strip()
filename = _appendExtensionIfMissing( filename )
self.modificator.mergeFromFile( filename )
self.modifiedData = True
except Exception, x:
_showTraceback()
print "Couldn't read from file %s: %s" % ( filename, str( x ) )
def do_showData( self, dummy ):
"""
Shows the current modified configuration
Usage: showData
"""
print self.modificator
def do_showHistory( self, args ):
"""
Shows the last commit history
Usage: showHistory <update limit>
"""
try:
argsList = args.split()
limit = 100
if len( argsList ) > 0:
limit = int( argsList[0] )
history = self.modificator.getHistory( limit )
print "%s recent commits:" % limit
for entry in history:
self._printPair( entry[0], entry[1], "@" )
except:
_showTraceback()
def do_showDiffWithServer( self, dummy ):
"""
Shows diff with lastest version in server
Usage: showDiffWithServer
"""
try:
diffData = self.modificator.showCurrentDiff()
print "Diff with latest from server ( + local - remote )"
for line in diffData:
if line[0] in ( '-' ):
print colorize( line, "red" )
elif line[0] in ( '+' ):
print colorize( line, "green" )
elif line[0] in ( '?' ):
print colorize( line, "yellow" ),
else:
print line
except:
_showTraceback()
def do_showDiffBetweenVersions( self, args ):
"""
Shows diff between two versions
Usage: showDiffBetweenVersions <version 1 with spaces> <version 2 with spaces>
"""
try:
argsList = args.split()
if len( argsList ) < 4:
print "What are the two versions to compare?"
return
v1 = " ".join ( argsList[0:2] )
v2 = " ".join ( argsList[2:4] )
print "Comparing '%s' with '%s' " % ( v1, v2 )
diffData = self.modificator.getVersionDiff( v1, v2 )
print "Diff with latest from server ( + %s - %s )" % ( v2, v1 )
for line in diffData:
if line[0] in ( '-' ):
print colorize( line, "red" )
elif line[0] in ( '+' ):
print colorize( line, "green" )
elif line[0] in ( '?' ):
print colorize( line, "yellow" ),
else:
print line
except:
_showTraceback()
def do_rollbackToVersion( self, args ):
"""
rolls back to user selected version of the configuration
Usage: rollbackToVersion <version with spaces>>
"""
try:
argsList = args.split()
if len( argsList ) < 2:
print "What version to rollback?"
return
version = " ".join ( argsList[0:2] )
choice = raw_input( "Do you really want to rollback to version %s? yes/no [no]: " % version )
choice = choice.lower()
if choice in ( "yes", "y" ):
response = self.modificator.rollbackToVersion( version )
if response[ 'OK' ]:
self.modifiedData = False
print "Rolled back."
self.modificator.loadFromRemote()
else:
print "Error sending data, server said: %s" % response['Message']
except:
_showTraceback()
def do_mergeWithServer( self, dummy ):
"""
Shows diff with lastest version in server
Usage: diffWithServer
"""
try:
choice = raw_input( "Do you want to merge with server configuration? yes/no [no]: " )
choice = choice.lower()
if choice in ( "yes", "y" ):
retVal = self.modificator.mergeWithServer()
if retVal[ 'OK' ]:
print "Merged"
else:
print "There was an error: ", retVal[ 'Message' ]
else:
print "Merge aborted"
except:
_showTraceback()
|
rajanandakumar/DIRAC
|
ConfigurationSystem/Client/CSCLI.py
|
Python
|
gpl-3.0
| 17,855
|
[
"DIRAC"
] |
3dcc90f4bf05e958cd4b780a15ebc27f7d46258636f8d86c57130ae9aab576b3
|
# -*- coding: utf-8 -*-
"""
Mapping among MWE, lemma form and sense candidates
"""
# This code is a part of coolisf library: https://github.com/letuananh/intsem.fx
# :copyright: (c) 2014 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
MWE_ERG_PRED_LEMMA = {
'_flesh_v_out_rel' : 'flesh out'
,'_flip_v_around_rel' : 'flip around'
,'_write_v_out_rel' : 'write out'
,'_hollow_v_out_rel' : 'hollow out'
,'_match_v_up_rel' : 'match up'
,'_split_v_up_rel' : 'split up'
,'_stretch_v_over_rel' : 'stretch over'
,'_put_v_forward_rel' : 'put forward'
,'_shake_v_out_rel' : 'shake out'
,'_stick_v_around_rel' : 'stick around'
,'_wheel_v_in_rel' : 'wheel in'
,'_brick_v_up_rel' : 'brick up'
,'_whip_v_up_rel' : 'whip up'
,'_play_v_back_rel' : 'play back'
,'_search_v_out_rel' : 'search out'
,'_single_v_out_rel' : 'single out'
,'_fork_v_off_rel' : 'fork off'
,'_straighten_v_out_rel' : 'straighten out'
,'_beam_v_out_rel' : 'beam out'
,'_scare_v_away_rel' : 'scare away'
,'_bottle_v_up_rel' : 'bottle up'
,'_drop_v_by_rel' : 'drop by'
,'_ship_v_back_rel' : 'ship back'
,'_turn_v_out_rel' : 'turn out expl'
,'_cast_v_off_rel' : 'cast off'
,'_put_v_off_rel' : 'put off'
,'_show_v_up_rel' : 'show up'
,'_slam_v_on_rel' : 'slam on'
,'_coil_v_up_rel' : 'coil up'
,'_round_v_out_rel' : 'round out'
,'_get_v_away-with_rel' : 'get away with'
,'_clean_v_up_rel' : 'clean up'
,'_hew_v_out_rel' : 'hew out'
,'_buy_v_back_rel' : 'buy back'
,'_end_v_up_rel' : 'end up'
,'_gasp_v_out_rel' : 'gasp out'
,'_log_v_in_rel' : 'log in'
,'_pat_v_down_rel' : 'pat down'
,'_sign_v_on_rel' : 'sign on'
,'_hitch_v_up_rel' : 'hitch up'
,'_trot_v_out_rel' : 'trot out'
,'_finish_v_up_rel' : 'finish up'
,'_haul_v_up_rel' : 'haul up'
,'_fly_v_over_rel' : 'fly over'
,'_home_v_in_rel' : 'home in'
,'_let_v_go-of_rel' : 'let go of'
,'_write_v_up_rel' : 'write up'
,'_sign_v_up_rel' : 'sign up'
,'_drive_v_off_rel' : 'drive off'
,'_bus_v_in_rel' : 'bus in'
,'_close_v_in_rel' : 'close in'
,'_mop_v_up_rel' : 'mop up'
,'_ramp_v_down_rel' : 'ramp down'
,'_haul_v_in_rel' : 'haul in'
,'_hook_v_up_rel' : 'hook up'
,'_pony_v_up_rel' : 'pony up'
,'_shape_v_up_rel' : 'shape up'
,'_chase_v_down_rel' : 'chase down'
,'_cling_v_on_rel' : 'cling on'
,'_gnaw_v_away_rel' : 'gnaw away'
,'_box_v_in_rel' : 'box in'
,'_ride_v_up_rel' : 'ride up'
,'_put_v_away_rel' : 'put away'
,'_hold_v_on_rel' : 'hold on'
,'_knock_v_up_rel' : 'knock up'
,'_melt_v_down_rel' : 'melt down'
,'_get_v_in_rel' : 'get in'
,'_hand_v_in_rel' : 'hand in'
,'_slug_v_down_rel' : 'slug down'
,'_stop_v_off_rel' : 'stop off'
,'_crack_v_open_rel' : 'crack open'
,'_show_v_off_rel' : 'show off'
,'_shoot_v_off_rel' : 'shoot off'
,'_winnow_v_out_rel' : 'winnow out'
,'_chase_v_away_rel' : 'chase away'
,'_sell_v_out_rel' : 'sell out'
,'_take_v_up_rel' : 'take up'
,'_deck_v_out_rel' : 'deck out'
,'_nail_v_up_rel' : 'nail up'
,'_note_v_down_rel' : 'note down'
,'_throw_v_down_rel' : 'throw down'
,'_flip_v_on_rel' : 'flip on'
,'_chew_v_off_rel' : 'chew off'
,'_dig_v_in_rel' : 'dig in'
,'_team_v_up_rel' : 'team up'
,'_scribble_v_down_rel' : 'scribble down'
,'_tease_v_apart_rel' : 'tease apart'
,'_throw_v_over_rel' : 'throw over'
,'_scale_v_up_rel' : 'scale up'
,'_seal_v_off_rel' : 'seal off'
,'_weigh_v_up_rel' : 'weigh up'
,'_bring_v_up_rel' : 'bring up'
,'_hold_v_up_rel' : 'hold up on'
,'_slice_v_off_rel' : 'slice off'
,'_muddle_v_along_rel' : 'muddle along'
,'_put_v_back_rel' : 'put back'
,'_grey_v_out_rel' : 'grey out'
,'_roll_v_up_rel' : 'roll up'
,'_pump_v_in_rel' : 'pump in'
,'_beef_v_up_rel' : 'beef up'
,'_wedge_v_in_rel' : 'wedge in'
,'_blast_v_off_rel' : 'blast off'
,'_bring_v_off_rel' : 'bring off'
,'_wear_v_off_rel' : 'wear off'
,'_zone_v_out_rel' : 'zone out'
,'_tune_v_in_rel' : 'tune in'
,'_wake_v_up_rel' : 'wake x up'
,'_step_v_up_rel' : 'step up'
,'_filter_v_out_rel' : 'filter out'
,'_tip_v_over_rel' : 'tip over'
,'_inch_v_up_rel' : 'inch up'
,'_puff_v_out_rel' : 'puff out'
,'_clear_v_up_rel' : 'clear up'
,'_keel_v_over_rel' : 'keel over'
,'_pin_v_up_rel' : 'pin up'
,'_try_v_out_rel' : 'try out'
,'_cool_v_down-cause_rel' : 'cool down'
,'_hold_v_out_rel' : 'hold out'
,'_pull_v_open_rel' : 'pull open'
,'_check_v_in_rel' : 'check in'
,'_scrape_v_out_rel' : 'scrape out'
,'_call_v_up_rel' : 'call up'
,'_switch_v_on_rel' : 'switch on'
,'_chip_v_in_rel' : 'chip in'
,'_slough_v_off_rel' : 'slough off'
,'_slow_v_down_rel' : 'slow down'
,'_put_v_aside_rel' : 'put aside'
,'_wrest_v_away_rel' : 'wrest away'
,'_let_v_up_rel' : 'let up'
,'_read_v_in_rel' : 'read in'
,'_hold_v_down_rel' : 'hold down'
,'_break_v_in_rel' : 'break in'
,'_bring_v_over_rel' : 'bring over'
,'_rule_v_out_rel' : 'rule out'
,'_shrug_v_off_rel' : 'shrug off'
,'_pour_v_off_rel' : 'pour off'
,'_gather_v_up_rel' : 'gather up'
,'_flick_v_on_rel' : 'flick on'
,'_shake_v_off_rel' : 'shake off'
,'_kick_v_out_rel' : 'kick out'
,'_drag_v_down_rel' : 'drag down'
,'_whistle_v_up_rel' : 'whistle up'
,'_sober_v_up_rel' : 'sober up'
,'_move_v_on_rel' : 'move on'
,'_shop_v_around_rel' : 'shop around'
,'_rough_v_out_rel' : 'rough out'
,'_carry_v_away_rel' : 'carry away'
,'_curtain_v_off_rel' : 'curtain off'
,'_stamp_v_out_rel' : 'stamp out'
,'_bring_v_home_rel' : 'bring home'
,'_fish_v_out_rel' : 'fish out'
,'_get_v_out_rel' : 'get out'
,'_glue_v_on_rel' : 'glue on'
,'_log_v_out_rel' : 'log out'
,'_look_v_over_rel' : 'look over'
,'_rent_v_out_rel' : 'rent out'
,'_slap_v_down_rel' : 'slap down'
,'_settle_v_down_rel' : 'settle down'
,'_pop_v_down_rel' : 'pop down'
,'_chatter_v_on_rel' : 'chatter on'
,'_yank_v_out_rel' : 'yank out'
,'_vomit_v_up_rel' : 'vomit up'
,'_hand_v_out_rel' : 'hand out'
,'_drink_v_down_rel' : 'drink down'
,'_rocket_v_up_rel' : 'rocket up'
,'_nod_v_off_rel' : 'nod off'
,'_toss_v_away_rel' : 'toss away'
,'_hunt_v_up_rel' : 'hunt up'
,'_stop_v_over_rel' : 'stop over'
,'_scale_v_down_rel' : 'scale down'
,'_shove_v_in_rel' : 'shove in'
,'_bring_v_forth_rel' : 'bring forth'
,'_let_v_out_rel' : 'let out'
,'_write_v_off_rel' : 'write off'
,'_perk_v_up_rel' : 'perk up'
,'_buff_v_up_rel' : 'buff up'
,'_ramp_v_up_rel' : 'ramp up'
,'_damp_v_down_rel' : 'damp down'
,'_average_v_out_rel' : 'average out'
,'_hit_v_up_rel' : 'hit up'
,'_do_v_away-with_rel' : 'do away'
,'_come_v_on_rel' : 'come on'
,'_keep_v_on_rel' : 'keep'
,'_wander_v_off_rel' : 'wander off'
,'_shut_v_out_rel' : 'shut out'
,'_spin_v_off_rel' : 'spin off'
,'_leave_v_over_rel' : 'leave over'
,'_auction_v_off_rel' : 'auction off'
,'_hang_v_about_rel' : 'hang about'
,'_lay_v_down_rel' : 'lay down'
,'_stink_v_up_rel' : 'stink up'
,'_look_v_back-at_rel' : 'look back at'
,'_tack_v_on_rel' : 'tack on'
,'_firm_v_up_rel' : 'firm up'
,'_chip_v_away_rel' : 'chip away'
,'_shoo_v_in_rel' : 'shoo in'
,'_strike_v_up_rel' : 'strike up'
,'_ham_v_up_rel' : 'ham up'
,'_dish_v_out_rel' : 'dish out'
,'_shore_v_up_rel' : 'shore up'
,'_mask_v_out_rel' : 'mask out'
,'_plug_v_in_rel' : 'plug in'
,'_pull_v_on_rel' : 'pull on'
,'_stir_v_up_rel' : 'stir up'
,'_sweat_v_out_rel' : 'sweat out'
,'_pass_v_on_rel' : 'pass on'
,'_make_v_up-of_rel' : 'make up'
,'_peel_v_away_rel' : 'peel away'
,'_copy_v_out_rel' : 'copy out'
,'_go_v_along_rel' : 'go along with'
,'_tail_v_off_rel' : 'tail off'
,'_hole_v_up_rel' : 'hole up'
,'_quiet_v_down_rel' : 'quiet down'
,'_heat_v_up-cause_rel' : 'heat up'
,'_drag_v_on_rel' : 'drag on'
,'_head_v_out_rel' : 'head out'
,'_look_v_forward-to_rel' : 'look forward to'
,'_push_v_away_rel' : 'push away'
,'_boot_v_up_rel' : 'boot up'
,'_set_v_about_rel' : 'set about'
,'_spin_v_out_rel' : 'spin out'
,'_spit_v_out_rel' : 'spit out'
,'_lag_v_behind_rel' : 'lag behind'
,'_weigh_v_down_rel' : 'weigh down'
,'_eke_v_out_rel' : 'eke out'
,'_rub_v_out_rel' : 'rub out'
,'_seal_v_in_rel' : 'seal in'
,'_stave_v_off_rel' : 'stave off'
,'_get_v_around_rel' : 'get around'
,'_call_v_forth_rel' : 'call forth'
,'_cash_v_in_rel' : 'cash in'
,'_offer_v_up_rel' : 'offer up'
,'_play_v_up_rel' : 'play up'
,'_hide_v_out_rel' : 'hide out'
,'_hand_v_down_rel' : 'hand down'
,'_stress_v_out_rel' : 'stress out'
,'_tack_v_down_rel' : 'tack down'
,'_stir_v_in_rel' : 'stir in'
,'_live_v_up_rel' : 'live up'
,'_take_v_x-off_rel' : 'take off'
,'_let_v_down_rel' : 'let down'
,'_smuggle_v_in_rel' : 'smuggle in'
,'_fly_v_off_rel' : 'fly off'
,'_stack_v_up_rel' : 'stack up'
,'_winch_v_up_rel' : 'winch up'
,'_squeeze_v_by_rel' : 'squeeze by'
,'_shoo_v_out_rel' : 'shoo out'
,'_keep_v_out_rel' : 'keep out'
,'_eat_v_in_rel' : 'eat in'
,'_leave_v_off_rel' : 'leave off'
,'_drive_v_around_rel' : 'drive around'
,'_hang_v_out_rel' : 'hang out'
,'_let_v_on_rel' : 'let on'
,'_pack_v_in_rel' : 'pack in'
,'_give_v_up_rel' : 'give up'
,'_knock_v_off_rel' : 'knock off'
,'_make_v_up_rel' : 'make up'
,'_root_v_out_rel' : 'root out'
,'_toss_v_aside_rel' : 'toss aside'
,'_load_v_up_rel' : 'load up'
,'_do_v_up_rel' : 'do up'
,'_build_v_up_rel' : 'build up'
,'_lift_v_off_rel' : 'lift off'
,'_soak_v_off_rel' : 'soak off'
,'_gin_v_up_rel' : 'gin up'
,'_look_v_up-to_rel' : 'look up to'
,'_wander_v_up_rel' : 'wander up'
,'_well_v_up_rel' : 'well up'
,'_set_v_in_rel' : 'set in'
,'_call_v_off_rel' : 'call off'
,'_skip_v_out_rel' : 'skip out'
,'_duck_v_out_rel' : 'duck out'
,'_get_v_through_rel' : 'get through'
,'_pull_v_out-of_rel' : 'pull out of'
,'_cross_v_off_rel' : 'cross off'
,'_hunt_v_out_rel' : 'hunt out'
,'_set_v_out-aim_rel' : 'set out'
,'_come_v_along_rel' : 'come along'
,'_divide_v_up_rel' : 'divide up'
,'_bring_v_about_rel' : 'bring about'
,'_blow_v_away_rel' : 'blow away'
,'_bottom_v_out_rel' : 'bottom out'
,'_gut_v_out_rel' : 'gut out'
,'_cut_v_up_rel' : 'cut up'
,'_shout_v_out_rel' : 'shout out'
,'_rough_v_in_rel' : 'rough in'
,'_ease_v_up_rel' : 'ease up'
,'_put_v_forth_rel' : 'put forth'
,'_rest_v_up_rel' : 'rest up'
,'_square_v_away_rel' : 'square away'
,'_type_v_up_rel' : 'type up'
,'_bum_v_around_rel' : 'bum around'
,'_chop_v_up_rel' : 'chop up'
,'_tune_v_up_rel' : 'tune up'
,'_free_v_up_rel' : 'free'
,'_order_v_up_rel' : 'order up'
,'_crowd_v_out_rel' : 'crowd out'
,'_ask_v_off_rel' : 'ask off'
,'_draft_v_in_rel' : 'draft in'
,'_mail_v_in_rel' : 'mail in'
,'_put_v_down_rel' : 'put down'
,'_slam_v_down_rel' : 'slam down'
,'_saddle_v_up_rel' : 'saddle up'
,'_scrape_v_away_rel' : 'scrape away'
,'_push_v_down_rel' : 'push down'
,'_tear_v_off_rel' : 'tear off'
,'_die_v_away_rel' : 'die away'
,'_call_v_back_rel' : 'call back'
,'_smooth_v_over_rel' : 'smooth over'
,'_flip_v_down_rel' : 'flip down'
,'_want_v_back_rel' : 'want back'
,'_sponge_v_off-of_rel' : 'sponge off of'
,'_read_v_off_rel' : 'read off'
,'_zero_v_in-on_rel' : 'zero in'
,'_pucker_v_up_rel' : 'pucker up'
,'_screen_v_out_rel' : 'screen out'
,'_shove_v_through_rel' : 'shove through'
,'_kick_v_around_rel' : 'kick around'
,'_cut_v_out_rel' : 'cut out'
,'_crack_v_down_rel' : 'crack down'
,'_sniff_v_out_rel' : 'sniff out'
,'_clean_v_out_rel' : 'clean out'
,'_play_v_down_rel' : 'play down'
,'_sort_v_out_rel' : 'sort out'
,'_slim_v_down_rel' : 'slim down'
,'_die_v_off_rel' : 'die off'
,'_stay_v_over_rel' : 'stay over'
,'_whisk_v_away_rel' : 'whisk away'
,'_trim_v_away_rel' : 'trim away'
,'_clip_v_off_rel' : 'clip off'
,'_code_v_up_rel' : 'code up'
,'_throw_v_open_rel' : 'throw open'
,'_hold_v_still_rel' : 'hold still'
,'_rat_v_out_rel' : 'rat out'
,'_fire_v_up_rel' : 'fire'
,'_shoot_v_down_rel' : 'shoot down'
,'_sleep_v_off_rel' : 'sleep off'
,'_spur_v_on_rel' : 'spur on'
,'_run_v_up_rel' : 'run up'
,'_trim_v_off_rel' : 'trim off'
,'_point_v_up_rel' : 'point up'
,'_boil_v_over_rel' : 'boil over'
,'_break_v_open_rel' : 'break open'
,'_copy_v_down_rel' : 'copy down'
,'_branch_v_out_rel' : 'branch out'
,'_fog_v_up_rel' : 'fog up'
,'_gun_v_down_rel' : 'gun down'
,'_line_v_up_rel' : 'line up'
,'_bow_v_down_rel' : 'bow down'
,'_bail_v_out_rel' : 'bail out'
,'_laugh_v_off_rel' : 'laugh off'
,'_rip_v_up_rel' : 'rip up'
,'_carry_v_out_rel' : 'carry out'
,'_come_v_together_rel' : 'come together'
,'_ground_v_out_rel' : 'ground out'
,'_find_v_out-about_rel' : 'find out'
,'_render_v_up_rel' : 'render up'
,'_simmer_v_down_rel' : 'simmer down'
,'_start_v_off_rel' : 'start off'
,'_stick_v_up-for_rel' : 'stick up for'
,'_trump_v_up_rel' : 'trump up'
,'_fall_v_back_rel' : 'fall back'
,'_march_v_off_rel' : 'march off'
,'_stop_v_in_rel' : 'stop in'
,'_pull_v_out_rel' : 'pull out'
,'_wipe_v_off_rel' : 'wipe off'
,'_draw_v_down_rel' : 'draw down'
,'_mail_v_out_rel' : 'mail out'
,'_keep_v_up_rel' : 'keep up with'
,'_shell_v_out_rel' : 'shell out'
,'_back_v_out_rel' : 'back out'
,'_find_v_out_rel' : 'find out'
,'_spurt_v_out_rel' : 'spurt out'
,'_string_v_on_rel' : 'string'
,'_weigh_v_in_rel' : 'weigh in'
,'_move_v_about_rel' : 'move about'
,'_board_v_up_rel' : 'board up'
,'_tease_v_open_rel' : 'tease open'
,'_top_v_off_rel' : 'top off'
,'_pull_v_off_rel' : 'pull off'
,'_schlep_v_around_rel' : 'schlep around'
,'_swear_v_in_rel' : 'swear in'
,'_take_v_apart_rel' : 'take apart'
,'_cut_v_short_rel' : 'cut short'
,'_turn_v_out_rel' : 'turn out'
,'_spill_v_out_rel' : 'spill out'
,'_swing_v_out_rel' : 'swing out'
,'_think_v_up_rel' : 'think up'
,'_turn_v_around_rel' : 'turn around'
,'_fork_v_over_rel' : 'fork over'
,'_look_v_up-dir_rel' : 'look up'
,'_patch_v_up_rel' : 'patch up'
,'_winch_v_in_rel' : 'winch in'
,'_bow_v_out_rel' : 'bow out'
,'_chat_v_up_rel' : 'chat up'
,'_add_v_up_rel' : 'add up'
,'_define_v_away_rel' : 'define away'
,'_pack_v_up_rel' : 'pack up'
,'_call_v_down_rel' : 'call down'
,'_come_v_through_rel' : 'come through'
,'_flip_v_off_rel' : 'flip off'
,'_hold_v_up_rel' : 'hold up'
,'_rinse_v_out_rel' : 'rinse out'
,'_key_v_in_rel' : 'key in'
,'_break_v_away_rel' : 'break away'
,'_set_v_out_rel' : 'set out'
,'_show_v_through_rel' : 'show through'
,'_hide_v_away_rel' : 'hide away'
,'_pile_v_on_rel' : 'pile on'
,'_blow_v_out_rel' : 'blow out'
,'_sound_v_off_rel' : 'sound off'
,'_lock_v_out_rel' : 'lock out'
,'_stick_v_on_rel' : 'stick on'
,'_get_v_on_rel' : 'get on'
,'_burn_v_up_rel' : 'burn up'
,'_mush_v_together_rel' : 'mush together'
,'_live_v_down_rel' : 'live down'
,'_piece_v_together_rel' : 'piece together'
,'_live_v_out_rel' : 'live out'
,'_stand_v_in-for_rel' : 'stand in for'
,'_speed_v_up_rel' : 'speed up'
,'_stiffen_v_up_rel' : 'stiffen up'
,'_chalk_v_up_rel' : 'chalk up'
,'_rub_v_in_rel' : 'rub in'
,'_poke_v_out_rel' : 'poke out'
,'_soup_v_up_rel' : 'soup up'
,'_seal_v_out_rel' : 'seal out'
,'_rush_v_through_rel' : 'rush through'
,'_sit_v_out_rel' : 'sit out'
,'_shine_v_out_rel' : 'shine out'
,'_stick_v_out_rel' : 'stick out'
,'_fix_v_up_rel' : 'fix up'
,'_come_v_back_rel' : 'come back'
,'_get_v_up_rel' : 'get up'
,'_rush_v_out_rel' : 'rush out'
,'_heave_v_out_rel' : 'heave out'
,'_belt_v_out_rel' : 'belt out'
,'_leave_v_home_rel' : 'leave home'
,'_shut_v_up_rel' : 'shut up'
,'_get_v_down_rel' : 'get down'
,'_plunk_v_down_rel' : 'plunk down'
,'_push_v_in_rel' : 'push in'
,'_clip_v_on_rel' : 'clip on'
,'_ferry_v_in_rel' : 'ferry in'
,'_help_v_out_rel' : 'help out'
,'_spirit_v_away_rel' : 'spirit away'
,'_stand_v_out_rel' : 'stand out'
,'_swing_v_around_rel' : 'swing around'
,'_bust_v_open_rel' : 'bust open'
,'_bandage_v_up_rel' : 'bandage up'
,'_turn_v_back_rel' : 'turn back'
,'_reel_v_off_rel' : 'reel off'
,'_frame_v_in_rel' : 'frame in'
,'_sleep_v_in_rel' : 'sleep in'
,'_scar_v_up_rel' : 'scar up'
,'_haul_v_off_rel' : 'haul off'
,'_stash_v_away_rel' : 'stash away'
,'_pour_v_in_rel' : 'pour in'
,'_brighten_v_up_rel' : 'brighten up'
,'_shove_v_out_rel' : 'shove out'
,'_spill_v_over_rel' : 'spill over'
,'_shack_v_up-with_rel' : 'shack up'
,'_wake_v_up_rel' : 'wake up'
,'_factor_v_out_rel' : 'factor out'
,'_throw_v_out_rel' : 'throw out'
,'_lay_v_off_rel' : 'lay off'
,'_figure_v_out_rel' : 'figure out'
,'_chew_v_out_rel' : 'chew out'
,'_squeeze_v_out_rel' : 'squeeze out'
,'_pull_v_across_rel' : 'pull across'
,'_button_v_up_rel' : 'button up'
,'_boil_v_down-to_rel' : 'boil down'
,'_fill_v_up_rel' : 'fill up'
,'_let_v_in_rel' : 'let in'
,'_go_v_away_rel' : 'go away'
,'_paste_v_on_rel' : 'paste on'
,'_usher_v_in_rel' : 'usher in'
,'_jet_v_off_rel' : 'jet off'
,'_snow_v_under_rel' : 'snow under'
,'_act_v_up_rel' : 'act up'
,'_rack_v_up_rel' : 'rack up'
,'_mark_v_off_rel' : 'mark off'
,'_beg_v_off_rel' : 'beg off'
,'_plant_v_out_rel' : 'plant out'
,'_butt_v_in_rel' : 'butt in'
,'_thrash_v_out_rel' : 'thrash out'
,'_blur_v_out_rel' : 'blur out'
,'_choke_v_up_rel' : 'choke up'
,'_nudge_v_up_rel' : 'nudge up'
,'_stand_v_in_rel' : 'stand in'
,'_enter_v_in_rel' : 'enter in'
,'_fly_v_on_rel' : 'fly on'
,'_burn_v_off_rel' : 'burn off'
,'_sum_v_up_rel' : 'sum up'
,'_drag_v_in_rel' : 'drag in'
,'_walk_v_over_rel' : 'walk over'
,'_stamp_v_on_rel' : 'stamp on'
,'_ride_v_in_rel' : 'ride in'
,'_jam_v_up_rel' : 'jam up'
,'_write_v_down_rel' : 'write down'
,'_pare_v_down_rel' : 'pare down'
,'_buy_v_up_rel' : 'buy up'
,'_give_v_off_rel' : 'give off'
,'_pluck_v_out_rel' : 'pluck out'
,'_rip_v_off_rel' : 'rip off'
,'_scout_v_out_rel' : 'scout out'
,'_drive_v_in_rel' : 'drive in'
,'_trail_v_off_rel' : 'trail off'
,'_choke_v_off_rel' : 'choke off'
,'_go_v_on_rel' : 'go on'
,'_snap_v_up_rel' : 'snap up'
,'_deal_v_out_rel' : 'deal out'
,'_doll_v_up_rel' : 'doll up'
,'_follow_v_around_rel' : 'follow around'
,'_clog_v_up_rel' : 'clog up'
,'_pick_v_out_rel' : 'pick out'
,'_face_v_up_rel' : 'face up'
,'_push_v_forward_rel' : 'push forward'
,'_go_v_through_rel' : 'go through'
,'_screw_v_up_rel' : 'screw up'
,'_sweat_v_off_rel' : 'sweat off'
,'_kick_v_in_rel' : 'kick in'
,'_tie_v_up_rel' : 'tie up'
,'_draw_v_up_rel' : 'draw up'
,'_come_v_around_rel' : 'come around'
,'_flush_v_out_rel' : 'flush out'
,'_head_v_off_rel' : 'head off'
,'_laugh_v_away_rel' : 'laugh away'
,'_puff_v_up_rel' : 'puff up'
,'_shoot_v_up_rel' : 'shoot up'
,'_throw_v_away_rel' : 'throw away'
,'_strip_v_away_rel' : 'strip away'
,'_kick_v_off_rel' : 'kick off'
,'_move_v_out_rel' : 'move out'
,'_pop_v_off_rel' : 'pop off'
,'_work_v_in_rel' : 'work in'
,'_pull_v_down_rel' : 'pull down'
,'_button_v_down_rel' : 'button down'
,'_shut_v_down_rel' : 'shut down'
,'_touch_v_down_rel' : 'touch down'
,'_eat_v_out_rel' : 'eat out'
,'_keep_v_on_rel' : 'keep on'
,'_scrape_v_together_rel' : 'scrape together'
,'_look_v_in_rel' : 'look in on'
,'_date_v_back_rel' : 'date back'
,'_hot_v_up_rel' : 'hot up'
,'_stub_v_out_rel' : 'stub out'
,'_stand_v_up-for_rel' : 'stand up for'
,'_trade_v_in_rel' : 'trade in'
,'_scarf_v_down_rel' : 'scarf down'
,'_write_v_in_rel' : 'write in'
,'_give_v_back_rel' : 'give back'
,'_tangle_v_up_rel' : 'tangle up'
,'_start_v_over_rel' : 'start over'
,'_turn_v_up_rel' : 'turn up'
,'_power_v_down_rel' : 'power down'
,'_push_v_back_rel' : 'push back'
,'_bulk_v_up_rel' : 'bulk up'
,'_bring_v_out_rel' : 'bring out'
,'_book_v_up_rel' : 'book up'
,'_conjure_v_up_rel' : 'conjure up'
,'_hang_v_around_rel' : 'hang around'
,'_hurry_v_up_rel' : 'hurry up'
,'_last_v_out_rel' : 'last out'
,'_swallow_v_up_rel' : 'swallow up'
,'_flick_v_off_rel' : 'flick off'
,'_ward_v_off_rel' : 'ward off'
,'_scrape_v_up_rel' : 'scrape up'
,'_close_v_out_rel' : 'close out'
,'_pitch_v_in_rel' : 'pitch in'
,'_lash_v_out_rel' : 'lash out'
,'_mess_v_up_rel' : 'mess up'
,'_scoop_v_out_rel' : 'scoop out'
,'_tighten_v_up_rel' : 'tighten up'
,'_win_v_over_rel' : 'win over'
,'_get_v_back_rel' : 'get back'
,'_knock_v_over_rel' : 'knock over'
,'_set_v_forth_rel' : 'set forth'
,'_soften_v_up_rel' : 'soften up'
,'_tone_v_down_rel' : 'tone down'
,'_keep_v_down_rel' : 'keep down'
,'_wear_v_away_rel' : 'wear away'
,'_dry_v_out_rel' : 'dry out'
,'_keep_v_up_rel' : 'keep up'
,'_come_v_out_rel' : 'come out with'
,'_lose_v_out_rel' : 'lose out on'
,'_pull_v_up_rel' : 'pull up'
,'_bawl_v_out_rel' : 'bawl out'
,'_ride_v_out_rel' : 'ride out'
,'_dam_v_up_rel' : 'dam up'
,'_drop_v_out_rel' : 'drop out'
,'_get_v_around-to_rel' : 'get around'
,'_knock_v_down_rel' : 'knock down'
,'_blot_v_out_rel' : 'blot out'
,'_blast_v_away_rel' : 'blast away'
,'_lay_v_out_rel' : 'lay out'
,'_do_v_so_rel' : 'do so'
,'_block_v_off_rel' : 'block off'
,'_lift_v_out_rel' : 'lift out'
,'_crop_v_up_rel' : 'crop up'
,'_bring_v_in_rel' : 'bring in'
,'_pipe_v_down_rel' : 'pipe down'
,'_snap_v_in_rel' : 'snap in'
,'_break_v_through_rel' : 'break through'
,'_head_v_on_rel' : 'head on'
,'_comb_v_out_rel' : 'comb out'
,'_start_v_out_rel' : 'start out'
,'_thrust_v_out_rel' : 'thrust out'
,'_wall_v_in_rel' : 'wall in'
,'_sew_v_up_rel' : 'sew up'
,'_ask_v_out_rel' : 'ask out'
,'_line_v_out_rel' : 'line out'
,'_sand_v_off_rel' : 'sand off'
,'_whip_v_off_rel' : 'whip off'
,'_prop_v_up_rel' : 'prop up'
,'_opt_v_out_rel' : 'opt out'
,'_suck_v_in_rel' : 'suck in'
,'_hunch_v_up_rel' : 'hunch up'
,'_tear_v_up_rel' : 'tear up'
,'_amp_v_up_rel' : 'amp up'
,'_horn_v_in_rel' : 'horn in on'
,'_coop_v_up_rel' : 'coop up'
,'_pay_v_down_rel' : 'pay down'
,'_raise_v_up_rel' : 'raise up'
,'_rave_v_on_rel' : 'rave on'
,'_push_v_through_rel' : 'push through'
,'_fit_v_in_rel' : 'fit in with'
,'_gouge_v_out_rel' : 'gouge out'
,'_root_v_around_rel' : 'root around'
,'_slip_v_in_rel' : 'slip in'
,'_get_v_off_rel' : 'get off'
,'_stare_v_down_rel' : 'stare down'
,'_clamp_v_down_rel' : 'clamp down'
,'_open_v_up_rel' : 'open up'
,'_pipe_v_in_rel' : 'pipe in'
,'_hack_v_up_rel' : 'hack up'
,'_leave_v_open_rel' : 'leave open'
,'_come_v_in_rel' : 'come in'
,'_yield_v_up_rel' : 'yield up'
,'_box_v_up_rel' : 'box up'
,'_come_v_about_rel' : 'come about'
,'_sock_v_in_rel' : 'sock in'
,'_tee_v_off_rel' : 'tee off'
,'_play_v_out_rel' : 'play out'
,'_sweep_v_away_rel' : 'sweep away'
,'_rabbit_v_on_rel' : 'rabbit on'
,'_drive_v_up_rel' : 'drive up'
,'_toss_v_in_rel' : 'toss in'
,'_scoop_v_up_rel' : 'scoop up'
,'_take_v_home_rel' : 'take home'
,'_think_v_through_rel' : 'think through'
,'_catch_v_on_rel' : 'catch on'
,'_short_v_out_rel' : 'short out'
,'_gulp_v_down_rel' : 'gulp down'
,'_credit_v_back_rel' : 'credit back'
,'_back_v_down_rel' : 'back down'
,'_churn_v_out_rel' : 'churn out'
,'_cover_v_over_rel' : 'cover over'
,'_dredge_v_up_rel' : 'dredge up'
,'_gouge_v_away_rel' : 'gouge away'
,'_ball_v_up_rel' : 'ball up'
,'_lead_v_out_rel' : 'lead out'
,'_square_v_up_rel' : 'square up'
,'_parachute_v_in_rel' : 'parachute in'
,'_give_v_away_rel' : 'give away'
,'_rust_v_away_rel' : 'rust away'
,'_fill_v_out_rel' : 'fill out'
,'_throw_v_up_rel' : 'throw up'
,'_serve_v_up_rel' : 'serve up'
,'_trip_v_up_rel' : 'trip up'
,'_bowl_v_over_rel' : 'bowl over'
,'_check_v_out_rel' : 'check out'
,'_grease_v_up_rel' : 'grease up'
,'_fuck_v_off_rel' : 'fuck off'
,'_wipe_v_up_rel' : 'wipe up'
,'_brush_v_off_rel' : 'brush off'
,'_puzzle_v_out_rel' : 'puzzle out'
,'_isolate_v_out_rel' : 'isolate out'
,'_clear_v_out_rel' : 'clear out'
,'_shut_v_off_rel' : 'shut off'
,'_summon_v_up_rel' : 'summon up'
,'_leave_v_on_rel' : 'leave on'
,'_let_v_off_rel' : 'let off'
,'_spark_v_off_rel' : 'spark off'
,'_wear_v_on_rel' : 'wear on'
,'_scrape_v_off_rel' : 'scrape off'
,'_burst_v_open_rel' : 'burst open'
,'_pick_v_up_rel' : 'pick up'
,'_pick_v_off_rel' : 'pick off'
,'_lock_v_away_rel' : 'lock away'
,'_jump_v_up_rel' : 'jump up'
,'_add_v_in_rel' : 'add in'
,'_drop_v_in_rel' : 'drop in'
,'_haul_v_out_rel' : 'haul out'
,'_leave_v_out_rel' : 'leave out'
,'_lift_v_away_rel' : 'lift away'
,'_ration_v_out_rel' : 'ration out'
,'_take_v_in_rel' : 'take in'
,'_like_v_back_rel' : 'like back'
,'_paste_v_in_rel' : 'paste in'
,'_rein_v_in_rel' : 'rein in'
,'_calm_v_down_rel' : 'calm down'
,'_toss_v_out_rel' : 'toss out'
,'_turn_v_off_rel' : 'turn off'
,'_cool_v_down_rel' : 'cool down'
,'_rig_v_up_rel' : 'rig up'
,'_run_v_out-of_rel' : 'run out'
,'_drop_v_off_rel' : 'drop off'
,'_leave_v_in_rel' : 'leave in'
,'_match_v_up_rel' : 'match up with'
,'_close_v_up_rel' : 'close up'
,'_wind_v_down_rel' : 'wind down'
,'_pin_v_on_rel' : 'pin on'
,'_strip_v_off_rel' : 'strip off'
,'_tick_v_off_rel' : 'tick off'
,'_scrunch_v_up_rel' : 'scrunch up'
,'_sweep_v_up_rel' : 'sweep up'
,'_glaze_v_over_rel' : 'glaze over'
,'_max_v_out_rel' : 'max out'
,'_step_v_out_rel' : 'step out'
,'_bomb_v_out_rel' : 'bomb out'
,'_bog_v_down_rel' : 'bog down'
,'_suck_v_up_rel' : 'suck up'
,'_tell_v_off_rel' : 'tell off'
,'_send_v_back_rel' : 'send back'
,'_clown_v_around_rel' : 'clown around'
,'_muddle_v_up_rel' : 'muddle up'
,'_wash_v_away_rel' : 'wash away'
,'_nose_v_around_rel' : 'nose around'
,'_pull_v_back_rel' : 'pull back'
,'_send_v_around_rel' : 'send around'
,'_run_v_over_rel' : 'run over'
,'_clock_v_in_rel' : 'clock in'
,'_soap_v_up_rel' : 'soap up'
,'_chase_v_up_rel' : 'chase up'
,'_lay_v_up_rel' : 'lay up'
,'_swallow_v_down_rel' : 'swallow down'
,'_strike_v_out_rel' : 'strike out'
,'_tease_v_out_rel' : 'tease out'
,'_want_v_out-of_rel' : 'want out of'
,'_turn_v_over_rel' : 'turn over'
,'_warm_v_up_rel' : 'warm up'
,'_make_v_up-for_rel' : 'make up for'
,'_dress_v_down_rel' : 'dress down'
,'_dry_v_off_rel' : 'dry off'
,'_clip_v_out_rel' : 'clip out'
,'_drive_v_down_rel' : 'drive down'
,'_tap_v_off_rel' : 'tap off'
,'_toss_v_down_rel' : 'toss down'
,'_get_v_on_rel' : 'get on with'
,'_lop_v_off_rel' : 'lop off'
,'_hang_v_on_rel' : 'hang on'
,'_sack_v_out_rel' : 'sack out'
,'_charge_v_off_rel' : 'charge off'
,'_pay_v_off_rel' : 'pay off'
,'_fence_v_in_rel' : 'fence in'
,'_marry_v_off_rel' : 'marry off'
,'_die_v_down_rel' : 'die down'
,'_use_v_up_rel' : 'use up'
,'_queue_v_up_rel' : 'queue up'
,'_follow_v_up_rel' : 'follow up'
,'_rub_v_off_rel' : 'rub off'
,'_ask_v_in_rel' : 'ask in'
,'_smash_v_in_rel' : 'smash in'
,'_snuggle_v_down_rel' : 'snuggle down'
,'_sound_v_out_rel' : 'sound out'
,'_divide_v_off_rel' : 'divide off'
,'_catch_v_up_rel' : 'catch up in'
,'_look_v_on_rel' : 'look on'
,'_rust_v_out_rel' : 'rust out'
,'_talk_v_up_rel' : 'talk up'
,'_stick_v_up_rel' : 'stick up'
,'_scratch_v_up_rel' : 'scratch'
,'_phase_v_out_rel' : 'phase out'
,'_wear_v_out_rel' : 'wear out'
,'_kill_v_off_rel' : 'kill off'
,'_band_v_together_rel' : 'band together'
,'_mull_v_over_rel' : 'mull over'
,'_steal_v_away_rel' : 'steal away'
,'_hunt_v_down_rel' : 'hunt down'
,'_blow_v_down_rel' : 'blow down'
,'_cancel_v_out_rel' : 'cancel out'
,'_finish_v_off_rel' : 'finish off'
,'_shake_v_up_rel' : 'shake up'
,'_shriek_v_out_rel' : 'shriek out'
,'_boot_v_out_rel' : 'boot out'
,'_break_v_up_rel' : 'break up'
,'_take_v_back_rel' : 'take back'
,'_tear_v_apart_rel' : 'tear apart'
,'_ask_v_up_rel' : 'ask up'
,'_cart_v_away_rel' : 'cart away'
,'_branch_v_off_rel' : 'branch off'
,'_hold_v_off_rel' : 'hold off'
,'_turn_v_down_rel' : 'turn down'
,'_bring_v_forward_rel' : 'bring forward'
,'_spruce_v_up_rel' : 'spruce up'
,'_trace_v_back_rel' : 'trace back'
,'_palm_v_off_rel' : 'palm off'
,'_come_v_across_rel' : 'come across'
,'_turn_v_on_rel' : 'turn on'
,'_sign_v_off_rel' : 'sign off'
,'_knot_v_up_rel' : 'knot up'
,'_fritter_v_away_rel' : 'fritter away'
,'_toss_v_off_rel' : 'toss off'
,'_bottle_v_in_rel' : 'bottle in'
,'_chop_v_down_rel' : 'chop down'
,'_pour_v_down_rel' : 'pour down'
,'_match_v_up_rel' : 'match up to'
,'_hoist_v_up_rel' : 'hoist up'
,'_wheel_v_out_rel' : 'wheel out'
,'_carve_v_out_rel' : 'carve out'
,'_slack_v_off_rel' : 'slack off'
,'_ease_v_out_rel' : 'ease out'
,'_straighten_v_up_rel' : 'straighten up'
,'_pull_v_away_rel' : 'pull away'
,'_cough_v_up_rel' : 'cough up'
,'_type_v_in_rel' : 'type in'
,'_act_v_out_rel' : 'act out'
,'_bite_v_out_rel' : 'bite out'
,'_shovel_v_out_rel' : 'shovel out'
,'_suck_v_out_rel' : 'suck out'
,'_hammer_v_out_rel' : 'hammer out'
,'_toss_v_up_rel' : 'toss up'
,'_drive_v_home_rel' : 'drive home'
,'_black_v_out_rel' : 'black out'
,'_draw_v_away_rel' : 'draw away'
,'_have_v_yet_rel' : 'have yet'
,'_brush_v_away_rel' : 'brush away'
,'_cook_v_up_rel' : 'cook up'
,'_do_v_in_rel' : 'do in'
,'_pass_v_off_rel' : 'pass off'
,'_slice_v_out_rel' : 'slice out'
,'_drive_v_out_rel' : 'drive out'
,'_slap_v_on_rel' : 'slap on'
,'_break_v_off_rel' : 'break off'
,'_burn_v_out_rel' : 'burn out'
,'_catch_v_up_rel' : 'catch up'
,'_gear_v_up_rel' : 'gear up'
,'_flatten_v_out_rel' : 'flatten out'
,'_bubble_v_over_rel' : 'bubble over'
,'_drum_v_up_rel' : 'drum up'
,'_pin_v_down_rel' : 'pin down to'
,'_tuck_v_in_rel' : 'tuck in'
,'_jack_v_up_rel' : 'jack up'
,'_carry_v_on_rel' : 'carry on'
,'_towel_v_off_rel' : 'towel off'
,'_cram_v_in_rel' : 'cram in'
,'_type_v_out_rel' : 'type out'
,'_wind_v_up_rel' : 'wind up'
,'_drive_v_away_rel' : 'drive away'
,'_press_v_on_rel' : 'press on'
,'_charge_v_up_rel' : 'charge up'
,'_dump_v_out_rel' : 'dump out'
,'_separate_v_out_rel' : 'separate out'
,'_sop_v_up_rel' : 'sop up'
,'_tense_v_up_rel' : 'tense up'
,'_help_v_along_rel' : 'help along'
,'_crank_v_out_rel' : 'crank out'
,'_fess_v_up_rel' : 'fess up'
,'_pass_v_along_rel' : 'pass along'
,'_give_v_out_rel' : 'give out'
,'_pull_v_over_rel' : 'pull over'
,'_put_v_together-of_rel' : 'put together'
,'_sell_v_off_rel' : 'sell off'
,'_rope_v_together_rel' : 'rope together'
,'_shy_v_away_rel' : 'shy away'
,'_size_v_up_rel' : 'size up'
,'_slice_v_up_rel' : 'slice up'
,'_take_v_along_rel' : 'take along'
,'_vote_v_down_rel' : 'vote down'
,'_map_v_out_rel' : 'map out'
,'_move_v_up_rel' : 'move up'
,'_bed_v_down_rel' : 'bed down'
,'_smuggle_v_out_rel' : 'smuggle out'
,'_snuff_v_out_rel' : 'snuff out'
,'_back_v_off_rel' : 'back off'
,'_cool_v_off_rel' : 'cool off'
,'_draw_v_forth_rel' : 'draw forth'
,'_fumble_v_around_rel' : 'fumble around'
,'_shop_v_around-for_rel' : 'shop around for'
,'_fill_v_in_rel' : 'fill in'
,'_join_v_in_rel' : 'join in'
,'_slow_v_up_rel' : 'slow up'
,'_clear_v_off_rel' : 'clear off'
,'_die_v_out_rel' : 'die out'
,'_muddle_v_through_rel' : 'muddle through'
,'_level_v_out_rel' : 'level out'
,'_bear_v_out_rel' : 'bear out'
,'_listen_v_in_rel' : 'listen in'
,'_nail_v_down_rel' : 'nail down'
,'_taper_v_off_rel' : 'taper off'
,'_harness_v_up_rel' : 'harness up'
,'_tire_v_out_rel' : 'tire out'
,'_turn_v_in_rel' : 'turn in'
,'_whack_v_off_rel' : 'whack off'
,'_double_v_up_rel' : 'double up'
,'_put_v_through_rel' : 'put through'
,'_cut_v_back_rel' : 'cut back'
,'_boil_v_down_rel' : 'boil down'
,'_move_v_in_rel' : 'move in'
,'_smash_v_up_rel' : 'smash up'
,'_block_v_out_rel' : 'block out'
,'_ring_v_up_rel' : 'ring'
,'_check_v_out-of_rel' : 'check out of'
,'_scarf_v_up_rel' : 'scarf up'
,'_give_v_in_rel' : 'give in'
,'_send_v_in_rel' : 'send in'
,'_want_v_out_rel' : 'want out'
,'_damp_v_out_rel' : 'damp out'
,'_mete_v_out_rel' : 'mete out'
,'_fling_v_back_rel' : 'fling back'
,'_lay_v_on_rel' : 'lay on'
,'_truss_v_up_rel' : 'truss up'
,'_fuck_v_around_rel' : 'fuck around'
,'_water_v_down_rel' : 'water down'
,'_get_v_down-to_rel' : 'get down to'
,'_break_v_out_rel' : 'break out'
,'_bring_v_along_rel' : 'bring along'
,'_scale_v_back_rel' : 'scale back'
,'_stall_v_off_rel' : 'stall off'
,'_come_v_over_rel' : 'come over'
,'_carve_v_up_rel' : 'carve up'
,'_drown_v_out_rel' : 'drown out'
,'_strike_v_off_rel' : 'strike off'
,'_lump_v_in_rel' : 'lump in'
,'_strip_v_down_rel' : 'strip down'
,'_dry_v_up_rel' : 'dry up'
,'_pipe_v_up_rel' : 'pipe up'
,'_round_v_down_rel' : 'round down'
,'_ship_v_out_rel' : 'ship out'
,'_speak_v_up_rel' : 'speak up'
,'_spell_v_out_rel' : 'spell out'
,'_store_v_up_rel' : 'store up'
,'_space_v_out_rel' : 'space out'
,'_tie_v_in_rel' : 'tie in'
,'_wash_v_down_rel' : 'wash down'
,'_camp_v_out_rel' : 'camp out'
,'_hold_v_onto_rel' : 'hold on to'
,'_pull_v_apart_rel' : 'pull apart'
,'_put_v_on_rel' : 'put on'
,'_drag_v_out_rel' : 'drag out'
,'_snap_v_off_rel' : 'snap off'
,'_sprawl_v_out_rel' : 'sprawl out'
,'_stand_v_up_rel' : 'stand up'
,'_pin_v_down_rel' : 'pin down'
,'_bundle_v_up_rel' : 'bundle up'
,'_gray_v_out_rel' : 'gray out'
,'_level_v_off_rel' : 'level off'
,'_wear_v_thin_rel' : 'wear thin'
,'_dress_v_up_rel' : 'dress up'
,'_stroll_v_along_rel' : 'stroll along'
,'_pay_v_over_rel' : 'pay over'
,'_log_v_on_rel' : 'log on'
,'_net_v_out_rel' : 'net out'
,'_freshen_v_up_rel' : 'freshen up'
,'_cry_v_out_rel' : 'cry out'
,'_swell_v_up_rel' : 'swell up'
,'_bone_v_up_rel' : 'bone up'
,'_heat_v_up_rel' : 'heat up'
,'_tie_v_on_rel' : 'tie on'
,'_invite_v_out_rel' : 'invite out'
,'_figure_v_in_rel' : 'figure in'
,'_muscle_v_out_rel' : 'muscle out'
,'_iron_v_out_rel' : 'iron out'
,'_measure_v_up_rel' : 'measure up'
,'_thin_v_out_rel' : 'thin out'
,'_cross_v_out_rel' : 'cross out'
,'_dream_v_up_rel' : 'dream up'
,'_buy_v_off_rel' : 'buy off'
,'_mess_v_around_rel' : 'mess around'
,'_tank_v_up_rel' : 'tank up'
,'_slacken_v_off_rel' : 'slacken off'
,'_wash_v_out_rel' : 'wash out'
,'_seal_v_up_rel' : 'seal up'
,'_crack_v_up_rel' : 'crack up'
,'_power_v_up_rel' : 'power up'
,'_fall_v_in_rel' : 'fall in'
,'_take_v_away_rel' : 'take away'
,'_leave_v_behind_rel' : 'leave behind'
,'_put_v_in_rel' : 'put in'
,'_brush_v_aside_rel' : 'brush aside'
,'_hash_v_out_rel' : 'hash out'
,'_head_v_up_rel' : 'head up'
,'_peter_v_out_rel' : 'peter out'
,'_take_v_aback_rel' : 'take aback'
,'_crank_v_up_rel' : 'crank up'
,'_clear_v_away_rel' : 'clear away'
,'_count_v_out_rel' : 'count out'
,'_plan_v_on_rel' : 'plan on'
,'_carry_v_in_rel' : 'carry in'
,'_parcel_v_out_rel' : 'parcel out'
,'_set_v_aside_rel' : 'set aside'
,'_fuck_v_up_rel' : 'fuck up'
,'_weave_v_in_rel' : 'weave in'
,'_swing_v_in_rel' : 'swing in'
,'_fool_v_around_rel' : 'fool around'
,'_print_v_out_rel' : 'print out'
,'_call_v_in_rel' : 'call in'
,'_balance_v_out_rel' : 'balance out'
,'_put_v_out_rel' : 'put out'
,'_draw_v_back_rel' : 'draw back'
,'_get_v_across_rel' : 'get across'
,'_run_v_back_rel' : 'run back'
,'_blast_v_out_rel' : 'blast out'
,'_grow_v_up_rel' : 'grow up'
,'_hang_v_up_rel' : 'hang up'
,'_pig_v_out_rel' : 'pig out'
,'_draw_v_in_rel' : 'draw in'
,'_gum_v_up_rel' : 'gum up'
,'_pay_v_out_rel' : 'pay out'
,'_draw_v_aside_rel' : 'draw aside'
,'_chop_v_out_rel' : 'chop out'
,'_buy_v_out_rel' : 'buy out'
,'_foul_v_up_rel' : 'foul up'
,'_slip_v_on_rel' : 'slip on'
,'_cotton_v_on_rel' : 'cotton on'
,'_knuckle_v_down_rel' : 'knuckle down'
,'_buck_v_up_rel' : 'buck up'
,'_pour_v_out_rel' : 'pour out'
,'_knock_v_out_rel' : 'knock out'
,'_push_v_aside_rel' : 'push aside'
,'_put_v_up_rel' : 'put up'
,'_bite_v_off_rel' : 'bite off'
,'_blurt_v_out_rel' : 'blurt out'
,'_take_v_out_rel' : 'take out'
,'_work_v_up_rel' : 'work up'
,'_settle_v_in_rel' : 'settle in'
,'_have_v_off_rel' : 'have off'
,'_stock_v_up_rel' : 'stock up'
,'_kit_v_out_rel' : 'kit out'
,'_come_v_up_rel' : 'come up'
,'_string_v_along_rel' : 'string'
,'_grandfather_v_in_rel' : 'grandfather in'
,'_split_v_off_rel' : 'split off'
,'_notch_v_up_rel' : 'notch up'
,'_add_v_on_rel' : 'add on'
,'_hook_v_up_rel' : 'hook up with'
,'_make_v_believe_rel' : 'make believe'
,'_narrow_v_down-to_rel' : 'narrow down'
,'_hand_v_off_rel' : 'hand off'
,'_invite_v_in_rel' : 'invite in'
,'_piss_v_off_rel' : 'piss off'
,'_clamp_v_down_rel' : 'clamp down on'
,'_meet_v_up_rel' : 'meet up'
,'_mail_v_back_rel' : 'mail back'
,'_cheer_v_up_rel' : 'cheer up'
,'_lock_v_in_rel' : 'lock in'
,'_scrub_v_off_rel' : 'scrub off'
,'_feed_v_in_rel' : 'feed in'
,'_fit_v_in_rel' : 'fit in'
,'_pump_v_up_rel' : 'pump up'
,'_sink_v_in_rel' : 'sink in'
,'_tear_v_open_rel' : 'tear open'
,'_reel_v_in_rel' : 'reel in'
,'_track_v_down_rel' : 'track down'
,'_polish_v_off_rel' : 'polish off'
,'_wash_v_up_rel' : 'wash up'
,'_wolf_v_down_rel' : 'wolf down'
,'_slip_v_off_rel' : 'slip off'
,'_phase_v_in_rel' : 'phase in'
,'_finish_v_up_rel' : 'finish'
,'_spread_v_out_rel' : 'spread out'
,'_hunker_v_down_rel' : 'hunker down'
,'_follow_v_through_rel' : 'follow through'
,'_gas_v_up_rel' : 'gas up'
,'_look_v_out_rel' : 'look out'
,'_wrap_v_up_rel' : 'wrap up'
,'_pull_v_in_rel' : 'pull in'
,'_fight_v_off_rel' : 'fight off'
,'_bat_v_away_rel' : 'bat away'
,'_bounce_v_back_rel' : 'bounce back'
,'_fry_v_up_rel' : 'fry up'
,'_pass_v_out_rel' : 'pass out'
,'_touch_v_off_rel' : 'touch off'
,'_hear_v_back-from_rel' : 'hear back from'
,'_chime_v_in_rel' : 'chime in'
,'_blank_v_out_rel' : 'blank out'
,'_make_v_out_rel' : 'make out'
,'_gobble_v_up_rel' : 'gobble up'
,'_squeeze_v_in_rel' : 'squeeze in'
,'_smooth_v_out_rel' : 'smooth out'
,'_close_v_off_rel' : 'close off'
,'_clutter_v_up_rel' : 'clutter up'
,'_fling_v_open_rel' : 'fling open'
,'_pass_v_up_rel' : 'pass up'
,'_reel_v_out_rel' : 'reel out'
,'_boil_v_up_rel' : 'boil up'
,'_steam_v_open_rel' : 'steam open'
,'_suck_v_up_rel' : 'suck up to'
,'_whip_v_out_rel' : 'whip out'
,'_squirrel_v_away_rel' : 'squirrel away'
,'_take_v_over_rel' : 'take over'
,'_pare_v_off_rel' : 'pare off'
,'_pluck_v_up_rel' : 'pluck up'
,'_roar_v_out_rel' : 'roar out'
,'_polish_v_up_rel' : 'polish up'
,'_snatch_v_up_rel' : 'snatch up'
,'_babble_v_on_rel' : 'babble on'
,'_hold_v_back_rel' : 'hold back'
,'_light_v_up_rel' : 'light up'
,'_listen_v_up_rel' : 'listen up'
,'_shout_v_back_rel' : 'shout back'
,'_seek_v_out_rel' : 'seek out'
,'_set_v_apart_rel' : 'set apart'
,'_psyche_v_out_rel' : 'psyche out'
,'_mark_v_out_rel' : 'mark out'
,'_set_v_up_rel' : 'set up'
,'_mark_v_down_rel' : 'mark down'
,'_read_v_over_rel' : 'read over'
,'_strap_v_up_rel' : 'strap up'
,'_log_v_off_rel' : 'log off'
,'_build_v_on_rel' : 'build on'
,'_bump_v_off_rel' : 'bump off'
,'_chill_v_out_rel' : 'chill out'
,'_look_v_around_rel' : 'look around'
,'_look_v_at_rel': 'look at'
,'_pal_v_around_rel' : 'pal around'
,'_send_v_off_rel' : 'send off'
,'_cover_v_up_rel' : 'cover up'
,'_lock_v_down_rel' : 'lock down'
,'_sharpen_v_up_rel' : 'sharpen up'
,'_spring_v_up_rel' : 'spring up'
,'_go_v_ahead_rel' : 'go ahead'
,'_shave_v_off_rel' : 'shave off'
,'_look_v_up_rel' : 'look up'
,'_build_v_in_rel' : 'build in'
,'_peel_v_off_rel' : 'peel off'
,'_break_v_even_rel' : 'break even'
,'_juggle_v_around_rel' : 'juggle around'
,'_talk_v_over_rel' : 'talk over'
,'_lock_v_on_rel' : 'lock on'
,'_swing_v_down_rel' : 'swing down'
,'_slurp_v_up_rel' : 'slurp up'
,'_lift_v_up_rel' : 'lift up'
,'_spin_v_around_rel' : 'spin around'
,'_tidy_v_up_rel' : 'tidy up'
,'_dig_v_up_rel' : 'dig up'
,'_bring_v_back_rel' : 'bring back'
,'_take_v_on_rel' : 'take on'
,'_stop_v_up_rel' : 'stop up'
,'_back_v_out-of_rel' : 'back out of'
,'_pencil_v_in_rel' : 'pencil in'
,'_wall_v_off_rel' : 'wall off'
,'_grow_v_over_rel' : 'grow over'
,'_knuckle_v_under_rel' : 'knuckle under'
,'_put_v_by_rel' : 'put by'
,'_narrow_v_down_rel' : 'narrow down'
,'_whip_v_in_rel' : 'whip in'
,'_cut_v_apart_rel' : 'cut apart'
,'_let_v_go-of_rel' : 'let go'
,'_stick_v_in_rel' : 'stick in'
,'_hem_v_in_rel' : 'hem in'
,'_flake_v_off_rel' : 'flake off'
,'_hold_v_in_rel' : 'hold in'
,'_bust_v_up_rel' : 'bust up'
,'_add_v_up-to_rel' : 'add up to'
,'_cool_v_off-cause_rel' : 'cool off'
,'_prop_v_open_rel' : 'prop open'
,'_poke_v_up_rel' : 'poke up'
,'_freeze_v_up_rel' : 'freeze up'
,'_blast_v_open_rel' : 'blast open'
,'_abstract_v_away_rel' : 'abstract away'
,'_gamble_v_away_rel' : 'gamble away'
,'_pump_v_out_rel' : 'pump out'
,'_cheer_v_on_rel' : 'cheer on'
,'_come_v_across_rel' : 'come across as'
,'_run_v_around_rel' : 'run around'
,'_send_v_around_rel' : 'send round'
,'_fling_v_off_rel' : 'fling off'
,'_hem_v_out_rel' : 'hem out'
,'_break_v_down_rel' : 'break down'
,'_flip_v_out_rel' : 'flip out'
,'_jot_v_down_rel' : 'jot down'
,'_push_v_off_rel' : 'push off'
,'_hike_v_up_rel' : 'hike up'
,'_string_v_together_rel' : 'string'
,'_tack_v_up_rel' : 'tack up'
,'_throw_v_off_rel' : 'throw off'
,'_siphon_v_off_rel' : 'siphon off'
,'_tote_v_up_rel' : 'tote up'
,'_dig_v_out_rel' : 'dig out'
,'_bring_v_together_rel' : 'bring together'
,'_trim_v_down_rel' : 'trim down'
,'_save_v_up_rel' : 'save up'
,'_work_v_out_rel' : 'work out'
,'_mix_v_up_rel' : 'mix up'
,'_wipe_v_out_rel' : 'wipe out'
,'_bend_v_over_rel' : 'bend over'
,'_patch_v_in_rel' : 'patch in'
,'_take_v_off_rel' : 'take off'
,'_flip_v_up_rel' : 'flip up'
,'_strike_v_down_rel' : 'strike down'
,'_blow_v_off_rel' : 'blow off'
,'_run_v_off_rel' : 'run off'
,'_stop_v_by_rel' : 'stop by'
,'_gang_v_up_rel' : 'gang up'
,'_get_v_along_rel' : 'get along'
,'_crash_v_out_rel' : 'crash out'
,'_set_v_off_rel' : 'set off'
,'_call_v_out_rel' : 'call out'
,'_dial_v_in_rel' : 'dial in'
,'_bubble_v_up_rel' : 'bubble up'
,'_chop_v_off_rel' : 'chop off'
,'_smooth_v_away_rel' : 'smooth away'
,'_swing_v_up_rel' : 'swing up'
,'_sand_v_down_rel' : 'sand down'
,'_frighten_v_off_rel' : 'frighten off'
,'_bleep_v_out_rel' : 'bleep out'
,'_pop_v_out_rel' : 'pop out'
,'_flag_v_down_rel' : 'flag down'
,'_throw_v_in_rel' : 'throw in'
,'_trace_v_down_rel' : 'trace down'
,'_scrub_v_out_rel' : 'scrub out'
,'_rot_v_out_rel' : 'rot out'
,'_burn_v_down_rel' : 'burn down'
,'_snow_v_in_rel' : 'snow in'
,'_go_v_off_rel' : 'go off'
,'_buoy_v_up_rel' : 'buoy up'
,'_limit_v_down_rel' : 'limit down'
,'_point_v_out-to_rel' : 'point out'
,'_sit_v_down_rel' : 'sit down'
,'_fly_v_in_rel' : 'fly in'
,'_cast_v_out_rel' : 'cast out'
,'_scare_v_off_rel' : 'scare off'
,'_beat_v_up_rel' : 'beat up'
,'_roll_v_over_rel' : 'roll over'
,'_speak_v_out_rel' : 'speak out'
,'_start_v_up_rel' : 'start up'
,'_take_v_down_rel' : 'take down'
,'_win_v_back_rel' : 'win back'
,'_grind_v_out_rel' : 'grind out'
,'_ice_v_over_rel' : 'ice over'
,'_lose_v_out_rel' : 'lose out'
,'_draw_v_out_rel' : 'draw out'
,'_drift_v_off_rel' : 'drift off'
,'_freak_v_out_rel' : 'freak out'
,'_hear_v_out_rel' : 'hear out'
,'_run_v_out_rel' : 'run out'
,'_round_v_up_rel' : 'round up'
,'_drink_v_up_rel' : 'drink up'
,'_shoo_v_away_rel' : 'shoo away'
,'_fight_v_back_rel' : 'fight back'
,'_louse_v_up_rel' : 'louse up'
,'_want_v_in_rel' : 'want in'
,'_lace_v_up_rel' : 'lace up'
,'_cave_v_in_rel' : 'cave in'
,'_switch_v_off_rel' : 'switch off'
,'_eat_v_up_rel' : 'eat up'
,'_reach_v_out_rel' : 'reach out'
,'_tune_v_out_rel' : 'tune out'
,'_bang_v_up_rel' : 'bang up'
,'_summon_v_forth_rel' : 'summon forth'
,'_look_v_out_rel' : 'look out for'
,'_roll_v_down_rel' : 'roll down'
,'_kick_v_up_rel' : 'kick up'
,'_send_v_out_rel' : 'send out'
,'_dash_v_out_rel' : 'dash out'
,'_flare_v_up_rel' : 'flare up'
,'_close_v_down_rel' : 'close down'
,'_ferret_v_out_rel' : 'ferret out'
,'_come_v_up_rel' : 'come up with'
,'_flunk_v_out_rel' : 'flunk out'
,'_read_v_out_rel' : 'read out'
,'_schedule_v_in_rel' : 'schedule in'
,'_screw_v_in_rel' : 'screw in'
,'_snuggle_v_up_rel' : 'snuggle up'
,'_cast_v_down_rel' : 'cast down'
,'_cut_v_off_rel' : 'cut off'
,'_back_v_up_rel' : 'back up'
,'_etch_v_away_rel' : 'etch away'
,'_throw_v_on_rel' : 'throw on'
,'_fork_v_out_rel' : 'fork out'
,'_lock_v_up_rel' : 'lock up'
,'_swoop_v_up_rel' : 'swoop up'
,'_wring_v_out_rel' : 'wring out'
,'_link_v_up_rel' : 'link up'
,'_pair_v_up_rel' : 'pair up'
,'_hose_v_down_rel' : 'hose down'
,'_contract_v_out_rel' : 'contract out'
,'_silt_v_up_rel' : 'silt up'
,'_pass_v_over_rel' : 'pass over'
,'_press_v_in_rel' : 'press in'
,'_total_v_up_rel' : 'total up'
,'_air_v_out_rel' : 'air out'
,'_force_v_open_rel' : 'force open'
,'_dole_v_out_rel' : 'dole out'
,'_write_v_back_rel' : 'write back'
,'_warm_v_over_rel' : 'warm over'
,'_luck_v_out_rel' : 'luck out'
,'_walk_v_off_rel' : 'walk off'
,'_blow_v_up_rel' : 'blow up'
,'_sew_v_on_rel' : 'sew on'
,'_rust_v_in_rel' : 'rust in'
,'_go_v_out_rel' : 'go out'
,'_have_v_back_rel' : 'have back'
,'_watch_v_out_rel' : 'watch out'
,'_pay_v_up_rel' : 'pay up'
,'_soak_v_up_rel' : 'soak up'
,'_check_v_up-on_rel' : 'check up on'
,'_miss_v_out_rel' : 'miss out'
,'_cut_v_down_rel' : 'cut down'
,'_edge_v_out_rel' : 'edge out'
,'_pop_v_up_rel' : 'pop up'
,'_seize_v_up_rel' : 'seize up'
,'_pile_v_up_rel' : 'pile up'
,'_stretch_v_out_rel' : 'stretch out'
,'_fend_v_off_rel' : 'fend off'
,'_come_v_out_rel' : 'come out'
,'_haul_v_away_rel' : 'haul away'
,'_soak_v_in_rel' : 'soak in'
,'_throttle_v_back_rel' : 'throttle back'
,'_clam_v_up_rel' : 'clam up'
,'_push_v_up_rel' : 'push up'
,'_fake_v_out_rel' : 'fake out'
,'_cut_v_in_rel' : 'cut in'
,'_loan_v_out_rel' : 'loan out'
,'_tip_v_off_rel' : 'tip off'
,'_weed_v_out_rel' : 'weed out'
,'_factor_v_in_rel' : 'factor in'
,'_mount_v_up_rel' : 'mount up'
,'_push_v_open_rel' : 'push open'
,'_bring_v_down_rel' : 'bring down'
,'_roll_v_out_rel' : 'roll out'
,'_crisp_v_up_rel' : 'crisp up'
,'_hand_v_over_rel' : 'hand over'
,'_even_v_out_rel' : 'even out'
,'_lead_v_on_rel' : 'lead on'
,'_quieten_v_down_rel' : 'quieten down'
,'_round_v_off_rel' : 'round off'
,'_back_v_away_rel' : 'back away'
,'_pull_v_through_rel' : 'pull through'
,'_put_v_together-of_rel' : 'put together of'
}
|
letuananh/intsem.fx
|
coolisf/mappings/mwemap.py
|
Python
|
mit
| 44,329
|
[
"BLAST",
"GULP"
] |
5538f4b7ae4d7a825bbf0c322c95cafb61910675a164cf759be7e31c73861e3e
|
import cv2
import numpy
import math
from numpy.random import random_integers
from scipy.signal import convolve2d
def prepare_test_image(image, width ,resize_shape, negated=False):
"""
This function normalizes an an already padded image and flattens it into a
row vector
:param image: the input image
:type image: numpy nd array
:param resize_shape: a tuple denoting the shape of the padded image
:type resize_shape: tuple
:param negated: a flag to know if the input image is a negated one
:type negated: boolean
:returns : a 1-D array
"""
# negate the image
if not negated:
image = 255-image
# resizing the image
resized_image = resize_img(image, resize_shape, negated=True)
#resized_image = width_normalization(image, width, resize_shape, negated=True)
# gaussian filtering
resized_image = cv2.GaussianBlur(resized_image,(3,3), 0)
# deskew
#deskewed_image = deskew(resized_image, resize_shape)
# normalize the image values to fit in the range [0,1]
norm_image = numpy.asarray(resized_image, dtype=numpy.float32) / 255.
# Flatten the image to a 1-D vector and return
return norm_image.reshape(1, resize_shape[0] * resize_shape[1])
def do_cropping(image, negated=False):
"""
This method will crop the image using the outermost detectable contour
:param image: input image
:type image: numpy array
:param negated: a boolean value indicating whether the image is already
negated one
:type negated: boolean
"""
# if the image has 3 channels, convert it into a single channel one
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check if the image is already negated. If not negate it
if not negated:
image = 255-image
# do thresholding
ret,thresh = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
# find contours
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# find the index of contour with maximum area
try:
max_index = numpy.argmax(numpy.asarray([len(c) for c in contours]))
except ValueError:
return image
# find the cropping co-ordinates
x, y, width, height = cv2.boundingRect(contours[max_index])
# return cropped image
cropped_img = image[y:y+height, x:x+width]
# the cropped image should of the same format as input image
if not negated:
cropped_img = 255-cropped_img
return cropped_img
def deskew(image, image_shape, negated=False):
"""
This method deskwes an image using moments
:param image: a numpy nd array input image
:param image_shape: a tuple denoting the image`s shape
:param negated: a boolean flag telling whether the input image is a negated one
:returns: a numpy nd array deskewd image
"""
# negate the image
if not negated:
image = 255-image
# calculate the moments of the image
m = cv2.moments(image)
if abs(m['mu02']) < 1e-2:
return image.copy()
# caclulating the skew
skew = m['mu11']/m['mu02']
M = numpy.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]])
img = cv2.warpAffine(image, M, image_shape, flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
return img
def resize_img(image, target_shape, value=255, min_padding=2, negated=False):
"""
This method adds padding to the image and makes it to a nxn array,
without losing the aspect ratio
:param image: the input image
:type image: numpy array
:param target_shape: the dimensions to which the image needs to be resized
:type target_shape: tuple
:param min_padding: minimum padding that to be added
:type min_padding: int
:param value: the value of the padding area, 0-black, 255-white
:type value: int
:param negated: a flag indicating the input image is a negated one or not
:type negated: bool
:returns : a padded image
"""
# if the image is a multi channel one, convert it into a single channel one
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# If the input image is already neagted, then the padding should be black
if negated:
value = 0
# image dimensions
image_height, image_width = image.shape
# target dimensions
target_height, target_width = target_shape
# Add padding
# The aim is to make an image of different width and height to a sqaure image
# For that first the biggest attribute among width and height are determined.
max_index = numpy.argmax([image_height, image_width])
# if height is the biggest one, then add padding to width until width becomes
# equal to height
if max_index == 0:
padded_img = cv2.copyMakeBorder(image, min_padding, min_padding,
(image_height + min_padding - image_width)/2,
(image_height + min_padding - image_width)/2,
cv2.BORDER_CONSTANT, value=value)
# else if width is the biggest one, then add padding to height until height becomes
# equal to width
else:
padded_img = cv2.copyMakeBorder(image,
(image_width + min_padding - image_height)/2,
(image_width + min_padding - image_height)/2,
min_padding, min_padding, cv2.BORDER_CONSTANT,
value=value)
# finally resize the sqaure image to the target shape
return cv2.resize(padded_img, target_shape)
def create_2d_gaussian(dim, sigma):
"""
This function creates a 2d gaussian kernel with the standard deviation
denoted by sigma
:param dim: integer denoting a side (1-d) of gaussian kernel
:type dim: int
:param sigma: the standard deviation of the gaussian kernel
:type sigma: float
:returns: a numpy 2d array
"""
# check if the dimension is odd
if dim % 2 == 0:
raise ValueError("Kernel dimension should be odd")
# initialize the kernel
kernel = numpy.zeros((dim, dim), dtype=numpy.float16)
# calculate the center point
center = dim/2
# calculate the variance
variance = sigma ** 2
# calculate the normalization coefficeint
coeff = 1. / (2 * variance)
# create the kernel
for x in range(0, dim):
for y in range(0, dim):
x_val = abs(x - center)
y_val = abs(y - center)
numerator = x_val**2 + y_val**2
denom = 2*variance
kernel[x,y] = coeff * numpy.exp(-1. * numerator/denom)
# normalise it
return kernel/sum(sum(kernel))
def elastic_transform(image, kernel_dim=13, sigma=6, alpha=36, negated=False):
"""
This method performs elastic transformations on an image by convolving
with a gaussian kernel.
NOTE: Image dimensions should be a sqaure image
:param image: the input image
:type image: a numpy nd array
:param kernel_dim: dimension(1-D) of the gaussian kernel
:type kernel_dim: int
:param sigma: standard deviation of the kernel
:type sigma: float
:param alpha: a multiplicative factor for image after convolution
:type alpha: float
:param negated: a flag indicating whether the image is negated or not
:type negated: boolean
:returns: a nd array transformed image
"""
# convert the image to single channel if it is multi channel one
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check if the image is a negated one
if not negated:
image = 255-image
# check if the image is a square one
if image.shape[0] != image.shape[1]:
raise ValueError("Image should be of sqaure form")
# check if kernel dimesnion is odd
if kernel_dim % 2 == 0:
raise ValueError("Kernel dimension should be odd")
# create an empty image
result = numpy.zeros(image.shape)
# create random displacement fields
displacement_field_x = numpy.array([[random_integers(-1, 1) for x in xrange(image.shape[0])] \
for y in xrange(image.shape[1])]) * alpha
displacement_field_y = numpy.array([[random_integers(-1, 1) for x in xrange(image.shape[0])] \
for y in xrange(image.shape[1])]) * alpha
# create the gaussian kernel
kernel = create_2d_gaussian(kernel_dim, sigma)
# convolve the fields with the gaussian kernel
displacement_field_x = convolve2d(displacement_field_x, kernel)
displacement_field_y = convolve2d(displacement_field_y, kernel)
# make the distortrd image by averaging each pixel value to the neighbouring
# four pixels based on displacement fields
for row in xrange(image.shape[1]):
for col in xrange(image.shape[0]):
low_ii = row + int(math.floor(displacement_field_x[row, col]))
high_ii = row + int(math.ceil(displacement_field_x[row, col]))
low_jj = col + int(math.floor(displacement_field_y[row, col]))
high_jj = col + int(math.ceil(displacement_field_y[row, col]))
if low_ii < 0 or low_jj < 0 or high_ii >= image.shape[1] -1 \
or high_jj >= image.shape[0] - 1:
continue
res = image[low_ii, low_jj]/4 + image[low_ii, high_jj]/4 + \
image[high_ii, low_jj]/4 + image[high_ii, high_jj]/4
result[row, col] = res
# if the input image was not negated, make the output image also a non
# negated one
if not negated:
result = 255-result
return result
def width_normalization(image, width, target_shape, negated=False):
"""
This method creates a width normalised 1-d vector of an image
:param image: the input image
:type image: numpy nd array
:param width: the width to which the image should be normalized
(a value of -1 will just crop the image along its contour)
:type width: int
:param target_shape: a tuple denoting the output dims
:type target_shape: tuple
:returns: a nd array width normalized image
"""
# if the image have 3 channels, then convert it into grayscale
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# negate the image
if not negated:
image = 255-image
# crop the number bounding box
cropped_img = do_cropping(image, negated=True)
if not (cropped_img.shape[0] * cropped_img.shape[1]):
cropped_img = image
# width normalization
if width == -1:
width_normalized_img = cropped_img
else:
width_normalized_img = cv2.resize(cropped_img, (width, cropped_img.shape[1]))
# add padding and resize to the specified shape
resized_image = resize_img(width_normalized_img, target_shape, negated=True)
# return the width normalized image
if not negated:
resized_image = 255-resized_image
return resized_image
|
vsvinayak/mnist-helper
|
mnist_helpers.py
|
Python
|
mit
| 11,328
|
[
"Gaussian"
] |
25224b620db1473c8ccd12baaf0c66aa9b75ac96160bc725cac8e749c8766f7d
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Guide for the reaction ensemble and the constant pH ensemble. The modeled
reaction is :math:`\\mathrm{AH} \\leftrightarrow \\mathrm{A}^- + \\mathrm{H}^+`.
"""
epilog = __doc__.split(":math:")[0] + "AH <-> A- + H+. " + \
"""You can choose in which ensemble you want to simulate via either
providing --reaction_ensemble or --constant_pH_ensemble as command line
argument to the script. Be aware that in the case of the reaction ensemble,
the dissociation constant gamma is not the thermodynamic reaction constant K,
but rather K * 1 mol/l and therefore carries a unit! In the case of the
constant pH method, gamma is the thermodynamic reaction constant!
"""
import numpy as np
import argparse
import espressomd
import espressomd.reaction_ensemble
parser = argparse.ArgumentParser(epilog=epilog)
group = parser.add_mutually_exclusive_group()
group.add_argument('--reaction_ensemble', action='store_const', dest='mode',
const='reaction_ensemble')
group.add_argument('--constant_pH_ensemble', action='store_const', dest='mode',
const='constant_pH_ensemble')
args = parser.parse_args()
# System parameters
#############################################################
box_l = 35
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.02
system.cell_system.skin = 0.4
# Particle setup
#############################################################
types = {
"HA": 0,
"A-": 1,
"H+": 2,
}
charge_dict = {
types["HA"]: 0,
types["A-"]: -1,
types["H+"]: +1,
}
N0 = 50 # number of titratable units
K_diss = 0.0088
for i in range(N0):
system.part.add(pos=np.random.random(3) * system.box_l, type=1)
for i in range(N0, 2 * N0):
system.part.add(pos=np.random.random(3) * system.box_l, type=2)
RE = None
if args.mode == "reaction_ensemble":
RE = espressomd.reaction_ensemble.ReactionEnsemble(
kT=1,
exclusion_radius=1,
seed=77)
RE.add_reaction(gamma=K_diss,
reactant_types=[types["HA"]],
reactant_coefficients=[1],
product_types=[types["A-"], types["H+"]],
product_coefficients=[1, 1],
default_charges=charge_dict)
elif args.mode == "constant_pH_ensemble":
RE = espressomd.reaction_ensemble.ConstantpHEnsemble(
kT=1, exclusion_radius=1, seed=77, constant_pH=2)
RE.add_reaction(gamma=K_diss, reactant_types=[types["HA"]],
product_types=[types["A-"], types["H+"]],
default_charges=charge_dict)
else:
raise RuntimeError(
"Please provide either --reaction_ensemble or --constant_pH_ensemble as argument ")
print(RE.get_status())
system.setup_type_map(list(types.values()))
# Set the hidden particle type to the lowest possible number to speed
# up the simulation
RE.set_non_interacting_type(type=max(types.values()) + 1)
for i in range(10000):
RE.reaction(reaction_steps=1)
if i % 100 == 0:
print("HA", system.number_of_particles(type=types["HA"]),
"A-", system.number_of_particles(type=types["A-"]),
"H+", system.number_of_particles(type=types["H+"]))
print(
"reaction 0 has acceptance rate: ",
RE.get_acceptance_rate_reaction(
reaction_id=0))
print(
"reaction 1 has acceptance rate: ",
RE.get_acceptance_rate_reaction(
reaction_id=1))
|
pkreissl/espresso
|
samples/reaction_ensemble.py
|
Python
|
gpl-3.0
| 4,222
|
[
"ESPResSo"
] |
1d6fecfa27fb1d4e7b58d8f932b9f5dc79cc9331c7c2c4752f8c2a9dd0e37388
|
from __future__ import division, print_function, absolute_import
from scipy import stats
import numpy as np
from numpy.testing import assert_almost_equal, assert_, assert_raises, \
assert_array_almost_equal, assert_array_almost_equal_nulp
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_2d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \
* self.n
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3
kde3 = _kde_subclass3(x1, kde.covariance)
y3 = kde3(xs)
assert_array_almost_equal_nulp(ys, y3, nulp=10)
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
|
apbard/scipy
|
scipy/stats/tests/test_kdeoth.py
|
Python
|
bsd-3-clause
| 7,895
|
[
"Gaussian"
] |
b40dda1b931c6d76b8b55cdda645ddde1580accab54668813434a20be1bb7b56
|
#!/usr/bin/env python
# Some code derived from hnetcdf_builder.py by Matt Paget & Edward King of CSIRO
# https://stash.csiro.au/projects/CMAR_RS/repos/netcdf-tools/browse/create/netcdf_builder.py
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Created on Jun 9, 2015
@author: Alex Ip (based on code by Matt Paget & Edward King)
'''
import netCDF4
import numpy as np
import os
import re
from collections import OrderedDict
import logging
from osgeo import gdal, gdalconst, osr
from datetime import datetime
from _gdfutils import log_multiline
# Only needed for testing
from pprint import pprint
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
try:
import netcdf_builder
except ImportError:
logger.error('Requires netcdf_builder.py (https://stash.csiro.au/projects/CMAR_RS/repos/netcdf-tools/browse/create/netcdf_builder.py)')
raise
class GDFNetCDF(object):
'''
Class GDFNetCDF - Class to manage GDF netCDF storage units
'''
def __init__(self, storage_config, netcdf_filename=None, netcdf_mode=None, netcdf_format=None, decimal_places=None):
'''
Constructor for class GDFNetCDF
Parameters:
storage_config: nested dict containing configuration for storage type (defined in class GDF)
netcdf_filename: Filename of netCDF file to be opened
netcdf_mode: Mode for netCDF file open
netcdf_format: Format for netCDF file open
'''
self._isopen = False
self.storage_config = storage_config
self.netcdf_filename = netcdf_filename
self.netcdf_mode = netcdf_mode or 'r' # Default to 'r' for reading
self.netcdf_format = netcdf_format or 'NETCDF4_CLASSIC'
self.decimal_places = decimal_places if decimal_places is not None else 6 # Default to 6 decimal places if no precision specified
if netcdf_filename is None:
self.netcdf_object = None
else:
self.open(netcdf_filename)
def __del__(self):
'''
Destructor for class GDFNetCDF
'''
self.close()
def close(self):
'''
Destructor for class GDFNetCDF
'''
self._isopen = False
try:
self.netcdf_object.close()
except:
pass
def open(self, netcdf_filename=None, netcdf_mode=None, netcdf_format=None):
'''
Constructor for class GDFNetCDF
Parameters:
storage_config: nested dict containing configuration for storage type (defined in class GDF)
netcdf_filename: Filename of netCDF file to be opened
netcdf_mode: Mode for netCDF file open
netcdf_format: Format for netCDF file open
'''
self._isopen = False
# Default to existing instance values
self.netcdf_filename = netcdf_filename or self.netcdf_filename
assert self.netcdf_filename, 'NetCDF filename not provided'
self.netcdf_mode = netcdf_mode or self.netcdf_mode
self.netcdf_format = netcdf_format or self.netcdf_format
if netcdf_mode == 'w':
self.netcdf_object = netCDF4.Dataset(self.netcdf_filename, mode=self.netcdf_mode, format=self.netcdf_format)
else:
# Format will be deduced by the netCDF modules
self.netcdf_object = netCDF4.Dataset(self.netcdf_filename, mode=self.netcdf_mode)
self.netcdf_format = self.netcdf_object.file_format
self._isopen = True
def create(self, netcdf_filename, index_tuple, dimension_index_dict={}, netcdf_format=None):
'''
Create new NetCDF File in 'w' mode with required dimensions
Parameters:
index_tuple = tuple of storage unit indices
dimension_index_dict: dict of iterables or 1D numpy arrays keyed by dimension_tag. Required for irregular dimensions (e.g. time)
'''
def set_dimension(dimension, dimension_config, index, dimension_index_vector=None):
'''
Parameters:
dimension: Dimension tag (e.g. X, Y, T, etc.)
dimension_config: Nested dict containing storage configuration from GDF.storage_config['<storage_type>']
index: index for storage unit
dimension_index_vector: Numpy array of index values for irregular dimension (e.g. time) or None for unlimited irregular dimension
'''
logger.debug('dimension = %s', dimension)
logger.debug('dimension_config = %s', dimension_config)
logger.debug('index = %s', index)
logger.debug('dimension_index_vector = %s', dimension_index_vector)
if dimension_config['indexing_type'] == 'regular' and not dimension_index_vector:
element_size = dimension_config['dimension_element_size']
dimension_min = index * dimension_config['dimension_extent'] + dimension_config['dimension_origin'] + element_size / 2.0 # Half pixel to account for netCDF centre of pixel reference
dimension_max = dimension_min + dimension_config['dimension_extent']
dimension_index_vector = np.around(np.arange(dimension_min, dimension_max, element_size), self.decimal_places)
# Cater for reversed index (e.g. positive Y index tends Southwards when image origin is in UL/NW corner)
if dimension_config['reverse_index']:
dimension_index_vector = dimension_index_vector[::-1]
#TODO: Implement fixed indexing type
log_multiline(logger.debug, dimension_index_vector, 'dimension_index_vector for %s' % dimension, '\t')
if dimension_index_vector is not None:
dimension_index_shape = dimension_index_vector.shape
assert len(dimension_index_shape) == 1, 'Invalid dimension_index_vector shape. Must be 1D'
assert dimension_index_shape[0] <= dimension_config['dimension_elements'], 'dimension_index_vector must have %d elements or fewer' % dimension_config['dimension_elements']
dimension_size = len(dimension_index_vector)
#TODO: Do range checks to ensure indices are within storage unit boundaries
else:
dimension_size = 0 # Unlimited dimension
dimension_name = dimension_config['dimension_name']
# Dimensions can be renamed with the 'renameDimension' method of the file
self.netcdf_object.createDimension(dimension_name, dimension_size)
variable = self.netcdf_object.createVariable(dimension_name,'f8',(dimension_name,))
for property_name, property_value in dimension_config['properties'].items():
logger.debug('property_name = %s, property_value = %s', property_name, property_value)
variable.__setattr__(property_name, property_value)
variable[:] = dimension_index_vector
def set_variable(variable_name, variable_config):
dimensions = self.storage_config['dimensions'].keys()
dimension_names = tuple([self.storage_config['dimensions'][dimension]['dimension_name']
for dimension in dimensions])
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
chunksizes = tuple([min(self.storage_config['dimensions'][dimension]['dimension_cache'], nc_shape_dict[dimension])
for dimension in dimensions])
logger.debug('Creating variable %s with dimensions %s and chunk sizes %s', variable_name, dimensions, chunksizes)
variable = self.netcdf_object.createVariable(variable_name, variable_config['netcdf_datatype_name'], dimensions=dimension_names,
chunksizes=chunksizes, fill_value=variable_config['nodata_value'], zlib=True)
logger.debug('variable = %s' % variable)
# Set variable metadata
metadata_dict = {variable_name + ':' + 'coordinates': ' '.join(dimension_names),
variable_name + ':' + 'grid_mapping': 'crs',
variable_name + ':' + 'standard_name': variable_name,
variable_name + ':' + 'long_name': variable_config['measurement_type_name']
}
self.set_attributes(metadata_dict)
self.netcdf_object.sync()
# Start of create function
# Default to existing instance value
self.netcdf_mode = 'w'
self.netcdf_format = netcdf_format or self.netcdf_format
self.open(netcdf_filename=netcdf_filename)
for dimension, dimension_config in self.storage_config['dimensions'].items():
set_dimension(dimension, dimension_config, index_tuple[self.storage_config['dimensions'].keys().index(dimension)], dimension_index_dict.get(dimension))
for variable, variable_config in self.storage_config['measurement_types'].items():
set_variable(variable, variable_config)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
creation_date = datetime.utcnow().strftime("%Y%m%d")
self.netcdf_object.history = 'NetCDF-CF file created %s.' %(creation_date)
self.netcdf_object.license = 'Generalised Data Framework NetCDF-CF Test File'
self.netcdf_object.spatial_coverage = '%f %s grid' % (self.storage_config['dimensions']['X']['dimension_extent'],
self.storage_config['dimensions']['X']['reference_system_unit'])
self.netcdf_object.featureType = 'grid'
self.sync()
def write_slice(self, variable_name, slice_array, indices_dict):
'''
Function to set a specified slice in the specified netCDF variable
Parameters:
variable_name: Name of variable to which slice array will be written
slice_array: Numpy array to be written to netCDF file
indices_dict: Dict keyed by dimension tag indicating the dimension(s) & index/indices to which the slice should be written
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
index_dimensions = indices_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('slice_array.shape = %s', slice_array.shape)
logger.debug('indices_dict = %s', indices_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(index_dimensions) <= set(dimensions), 'Invalid slice index dimension(s)'
assert len(slice_array.shape) + len(indices_dict) == len(dimensions), 'Indices must be provided for all dimensions not covered by the data array'
slice_shape = tuple(nc_shape_dict[dimension] for dimension in dimensions if dimension not in indices_dict)
assert slice_array.shape == slice_shape, 'Shape of data array %s does not match storage unit slice shape %s' % (slice_array.shape, slice_shape)
# Create slices for accessing netcdf array
slicing = [slice(indices_dict[dimension], indices_dict[dimension] + 1) if dimension in index_dimensions
else slice(0, nc_shape_dict[dimension]) for dimension in dimensions]
logger.debug('slicing = %s', slicing)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
logger.debug('slice_array = %s', slice_array)
variable[slicing] = slice_array
def read_slice(self, variable_name, indices_dict):
'''
Function to read a specified slice in the specified netCDF variable
Parameters:
variable_name: Name of variable from which the slice array will be read
indices_dict: Dict keyed by dimension tag indicating the dimension(s) & index/indices from which the slice should be read
Returns:
slice_array: Numpy array read from netCDF file
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
index_dimensions = indices_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('indices_dict = %s', indices_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(index_dimensions) <= set(dimensions), 'Invalid slice index dimension(s)'
# Create slices for accessing netcdf array
slicing = [slice(indices_dict[dimension], indices_dict[dimension] + 1) if dimension in index_dimensions
else slice(0, nc_shape_dict[dimension]) for dimension in dimensions]
logger.debug('slicing = %s', slicing)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
slice_array = variable[slicing]
logger.debug('slice_array = %s', slice_array)
return slice_array
def get_subset_indices(self, range_dict):
'''
Function to read an array subset of the specified netCDF variable
Parameters:
variable_name: Name of variable from which the subset array will be read
range_dict: Dict keyed by dimension tag containing the dimension(s) & range tuples from which the subset should be read
Returns:
dimension_indices_dict: Dict containing array indices for each dimension
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
range_dimensions = range_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('range_dict = %s', range_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(range_dimensions) <= set(dimensions), 'Invalid range dimension(s)'
# Create slices for accessing netcdf array
dimension_indices_dict = {} # Dict containing all indices for each dimension
for dimension_index in range(len(dimensions)):
dimension = dimensions[dimension_index]
dimension_array = self.netcdf_object.variables[dimension_names[dimension_index]][:]
if dimension in range_dimensions:
logger.debug('dimension_array = %s', dimension_array)
logger.debug('range = %s', range_dict[dimension])
mask_array = ((dimension_array > range_dict[dimension][0]) * (dimension_array <= range_dict[dimension][1]))
index_array = np.where(mask_array)
logger.debug('index_array = %s', index_array)
dimension_indices_dict[dimension] = dimension_array[mask_array]
if not index_array:
logger.warning('Invalid range %s for dimension %s', range_dict[dimension], dimension)
return None
else: # Range not defined for this dimension - take the whole lot
dimension_indices_dict[dimension] = dimension_array
return dimension_indices_dict
def read_subset(self, variable_name, range_dict, max_bytes=None):
'''
Function to read an array subset of the specified netCDF variable
Parameters:
variable_name: Name of variable from which the subset array will be read
range_dict: Dict keyed by dimension tag containing the dimension(s) & range tuples from which the subset should be read
Returns:
subset_array: Numpy array read from netCDF file
dimension_indices_dict: Dict containing array indices for each dimension
max_bytes: integer specifying maximum number of bytes per read. None = unlimited
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
range_dimensions = range_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('range_dict = %s', range_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(range_dimensions) <= set(dimensions), 'Invalid range dimension(s)'
# Create slices for accessing netcdf array
dimension_indices_dict = {} # Dict containing all indices for each dimension
slicing = []
for dimension_index in range(len(dimensions)):
dimension = dimensions[dimension_index]
dimension_array = self.netcdf_object.variables[dimension_names[dimension_index]][:]
if dimension in range_dimensions:
logger.debug('dimension_array = %s', dimension_array)
logger.debug('range = %s', range_dict[dimension])
mask_array = ((dimension_array > range_dict[dimension][0]) * (dimension_array <= range_dict[dimension][1]))
index_array = np.where(mask_array)
logger.debug('index_array = %s', index_array)
dimension_indices_dict[dimension] = dimension_array[mask_array]
try:
dimension_slice = slice(index_array[0][0], index_array[0][-1] + 1)
except IndexError:
logger.warning('Invalid range %s for dimension %s', range_dict[dimension], dimension)
return None
else: # Range not defined for this dimension
dimension_indices_dict[dimension] = dimension_array
dimension_slice = slice(0, nc_shape_dict[dimension])
slicing.append(dimension_slice)
logger.debug('slicing = %s', slicing)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
if max_bytes == None: # Unlimited read size
subset_array = variable[slicing]
else: # Break read operation into separate reads each under maximum size
#TODO: Allow for case where slice size is greater than max_bytes - i.e. partitioning in more than one dimension
subset_shape = tuple([s.stop - s.start for s in slicing])
logger.debug('subset_shape = %s', subset_shape)
slice_bytes = variable[[slice(0,1) for dimension in dimension_names]].itemsize * reduce(lambda x, y: x*y, [s.stop - s.start for s in slicing[1:]])
max_slices = (max_bytes //
slice_bytes //
self.storage_config['dimensions'][dimensions[0]]['dimension_cache'] *
self.storage_config['dimensions'][dimensions[0]]['dimension_cache'])
logger.debug('max_slices = %s', max_slices)
subset_array = np.zeros(shape=subset_shape, dtype=variable.dtype)
for source_start_index in range(slicing[0].start, slicing[0].stop, max_slices):
source_stop_index = min([source_start_index + max_slices, slicing[0].stop])
source_slicing = [slice(source_start_index, source_stop_index)] + slicing[1:]
destination_slicing = [slice(source_slicing[slice_index].start - slicing[slice_index].start, source_slicing[slice_index].stop - slicing[slice_index].start)
for slice_index in range(len(source_slicing))]
logger.debug('source_slicing = %s', source_slicing)
logger.debug('destination_slicing = %s', destination_slicing)
subset_array[destination_slicing] = variable[source_slicing]
logger.debug('subset_array = %s', subset_array)
return subset_array, dimension_indices_dict
def get_datatype(self, variable_name, convention='numpy'):
'''
Returns NetCDF datatype of specified variable
'''
return self.storage_config['measurement_types'][variable_name].get(convention + '_datatype_name')
def get_attributes(self, verbose=None, normalise=True):
"""
Copy the global and variable attributes from a netCDF object to an
OrderedDict. This is a little like 'ncdump -h' (without the formatting).
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
Normalise means that some NumPy types returned from the netCDF module are
converted to equivalent regular types.
Notes from the netCDF module:
The ncattrs method of a Dataset or Variable instance can be used to
retrieve the names of all the netCDF attributes.
The __dict__ attribute of a Dataset or Variable instance provides all
the netCDF attribute name/value pairs in an OrderedDict.
self.netcdf_object.dimensions.iteritems()
self.netcdf_object.variables
self.netcdf_object.ncattrs()
self.netcdf_object.__dict__
"""
return netcdf_builder.get_attributes(self.netcdf_object, verbose, normalise)
def set_attributes(self, ncdict, delval='DELETE'):
"""
Copy attribute names and values from a dict (or OrderedDict) to a netCDF
object.
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
If any value is equal to delval then, if the corresponding attribute exists
in the netCDF object, the corresponding attribute is removed from the
netCDF object. The default value of delval is 'DELETE'. For example,
nc3_set_attributes(self.netcdf_object, {'temperature:missing_value':'DELETE'})
will delete the missing_value attribute from the temperature variable.
A ValueError exception is raised if a key refers to a variable name that
is not defined in the netCDF object.
"""
netcdf_builder.set_attributes(self.netcdf_object, ncdict, delval)
def show_dimensions(self):
"""
Print the dimension names, lengths and whether they are unlimited.
"""
netcdf_builder.show_dimensions(self.netcdf_object)
def set_variable(self, varname, dtype='f4', dims=None, chunksize=None, fill=None, zlib=False, **kwargs):
"""
Define (create) a variable in a netCDF object. No data is written to the
variable yet. Give the variable's dimensions as a tuple of dimension names.
Dimensions must have been previously created with self.netcdf_object.createDimension
(e.g. see set_timelatlon()).
Recommended ordering of dimensions is:
time, height or depth (Z), latitude (Y), longitude (X).
Any other dimensions should be defined before (placed to the left of) the
spatio-temporal coordinates.
To create a scalar variable, use an empty tuple for the dimensions.
Variables can be renamed with the 'renameVariable' method of the netCDF
object.
Specify compression with zlib=True (default = False).
Specify the chunksize with a sequence (tuple, list) of the same length
as dims (i.e., the number of dimensions) where each element of chunksize
corresponds to the size of the chunk along the corresponding dimension.
There are some tips and tricks associated with chunking - see
http://data.auscover.org.au/node/73 for an overview.
The default behaviour is to create a floating-point (f4) variable
with dimensions ('time','latitude','longitude'), with no chunking and
no compression.
"""
netcdf_builder.set_variable(self.netcdf_object, varname, dtype=dtype, dims=dims, chunksize=chunksize, fill=fill, zlib=zlib, **kwargs)
def add_bounds(self, dimension_tag, bounds):
"""Add a bounds array of data to the netCDF object.
Bounds array can be a list, tuple or NumPy array.
A bounds array gives the values of the vertices corresponding to a dimension
variable (see the CF documentation for more information). The dimension
variable requires an attribute called 'bounds', which references a variable
that contains the bounds array. The bounds array has the same shape as the
corresponding dimension with an extra size for the number of vertices.
This function:
- Adds a 'bounds' attribute to the dimension variable if required.
If a bounds attribute exits then its value will be used for the bounds
variable (bndname). Otherwise if a bndname is given then this will be
used. Otherwise the default bndname will be '_bounds' appended to the
dimension name.
- If the bounds variable exists then a ValueError will be raised if its
shape does not match the bounds array.
- If the bounds variable does not exist then it will be created. If so
an exra dimension is required for the number of vertices. Any existing
dimension of the right size will be used. Otherwise a new dimension
will be created. The new dimension's name will be 'nv' (number of
vertices), unless this dimension name is already used in which case
'_nv' appended to the dimension name will be used instead.
- Lastly, the bounds array is written to the bounds variable. If the
corresponding dimension is time (name = 'time' or dim.axis = 't') then
the bounds array will be written as date2num data.
"""
dimension_tag = dimension_tag.upper()
dimension_name=self.storage_config['dimensions'][dimension_tag]['dimension_name']
bounds_name = dimension_name + '_bounds'
netcdf_builder.add_bounds(self.netcdf_object, dimension_name, bounds, bounds_name)
def georeference_from_file(self, gdal_dataset_path):
'''
Function to set georeferencing from template GDAL dataset
'''
def getMinMaxExtents(samples, lines, geoTransform):
"""
Calculates the min/max extents based on the input latitude and longitude vectors.
:param samples:
An integer representing the number of samples (columns) in an array.
:param lines:
An integer representing the number of lines (rows) in an array.
:param geoTransform:
A tuple containing the geotransform information returned by GDAL.
:return:
A tuple containing (min_lat, max_lat, min_lon, max_lat)
:notes:
Hasn't been tested for northern or western hemispheres.
"""
extents = []
x_list = [0,samples]
y_list = [0,lines]
for px in x_list:
for py in y_list:
x = geoTransform[0]+(px*geoTransform[1])+(py*geoTransform[2])
y = geoTransform[3]+(px*geoTransform[4])+(py*geoTransform[5])
extents.append([x,y])
extents = np.array(extents)
min_lat = np.min(extents[:,1])
max_lat = np.max(extents[:,1])
min_lon = np.min(extents[:,0])
max_lon = np.max(extents[:,0])
return (min_lat, max_lat, min_lon, max_lon)
# Start of georeference_from_file(self, gdal_dataset_path) definition
gdal_dataset = gdal.Open(gdal_dataset_path)
assert gdal_dataset, 'Unable to open file %s' % gdal_dataset_path
geotransform = gdal_dataset.GetGeoTransform()
logger.debug('geotransform = %s', geotransform)
projection = gdal_dataset.GetProjection()
logger.debug('projection = %s', projection)
# Set coordinate reference system metadata variable
spatial_reference = osr.SpatialReference()
spatial_reference.ImportFromWkt(projection)
crs_metadata = {'crs:name': spatial_reference.GetAttrValue('geogcs'),
'crs:longitude_of_prime_meridian': 0.0, #TODO: This needs to be fixed!!! An OSR object should have this, but maybe only for specific OSR references??
'crs:inverse_flattening': spatial_reference.GetInvFlattening(),
'crs:semi_major_axis': spatial_reference.GetSemiMajor(),
'crs:semi_minor_axis': spatial_reference.GetSemiMinor(),
}
self.set_variable('crs', dims=(), dtype='i4')
self.set_attributes(crs_metadata)
logger.debug('crs_metadata = %s', crs_metadata)
extents = getMinMaxExtents(gdal_dataset.RasterXSize, gdal_dataset.RasterYSize, geotransform)
#pdb.set_trace()
self.netcdf_object.geospatial_lat_min = extents[0]
self.netcdf_object.geospatial_lat_max = extents[1]
self.netcdf_object.geospatial_lat_units = 'degrees_north'
self.netcdf_object.geospatial_lat_resolution = geotransform[5]
self.netcdf_object.geospatial_lon_min = extents[2]
self.netcdf_object.geospatial_lon_max = extents[3]
self.netcdf_object.geospatial_lon_units = 'degrees_east'
self.netcdf_object.geospatial_lon_resolution = geotransform[1]
def sync(self):
'''
Function to sync file to disk
'''
if self._isopen:
self.netcdf_object.sync()
@property
def isopen(self):
return self._isopen
|
GeoscienceAustralia/gdf
|
gdf/_gdfnetcdf.py
|
Python
|
apache-2.0
| 33,533
|
[
"NetCDF"
] |
df5c3ba71e1121ee42fd6e35a6f67459e1c8589f098394775aef9678c3fd615b
|
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2015-2020 Dominik Kriegner <dominik.kriegner@gmail.com>
import unittest
import xrayutilities as xu
class TestStructureFactor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.at = xu.materials.elements.Dummy
cls.mat = xu.materials.Crystal(
'test', xu.materials.SGLattice('227:1', 4, atoms=[cls.at, ],
pos=['8a', ]))
def test_StructureFactor(self):
f = self.mat.StructureFactor(self.mat.Q(1, 3, 1))
self.assertAlmostEqual(f, 4 - 4j, places=10)
f = self.mat.StructureFactor(self.mat.Q(0, 4, 0))
self.assertAlmostEqual(f, 8, places=10)
f = self.mat.StructureFactor(self.mat.Q(1, 2, 1))
self.assertAlmostEqual(f, 0, places=10)
def test_StructureFactorQ(self):
q = (self.mat.Q(1, 1, 1), self.mat.Q(0, 4, 0), self.mat.Q(1, 2, 1))
f = self.mat.StructureFactorForQ(q)
for i in range(3):
self.assertAlmostEqual(f[i], (4 + 4j, 8, 0)[i], places=10)
def test_StructureFactorE(self):
q = self.mat.Q(1, 1, 1)
f = self.mat.StructureFactorForEnergy(q, (1000, 2000, 3000))
for i in range(3):
self.assertAlmostEqual(f[i], 4 + 4j, places=10)
if __name__ == '__main__':
unittest.main()
|
dkriegner/xrayutilities
|
tests/test_structure_factor.py
|
Python
|
gpl-2.0
| 1,990
|
[
"CRYSTAL"
] |
20a6bb6b4958a17b0631b81f877d92b5c1927b325a58ea3c504d7b8713a35098
|
# Copyright (C) 2012,2013,2017
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.interaction.VSpherePair
**********************************
This class provides methods to compute forces and energies of
the VSpherePair potential.
.. math::
V(r_{ij}, \sigma_{ij}) = \varepsilon
\left( \frac{2 \pi}{3} \sigma_{ij}\right)^{- \frac{3}{2}}
e^{- \frac{3}{2} \frac{r_{ij}^2}{\sigma_{ij}}} ,
r_{ij} = \left| \vec{r_i} - \vec{r_j} \right| ,
\sigma_{ij} = \sigma_i^2 + \sigma_j^2
Reference: Flactuating soft-sphere approach to coars-graining of polymer melts, Soft matter, 2010, 6, 2282
.. function:: espressopp.interaction.VSpherePair(epsilon, cutoff, shift)
:param epsilon: (default: 1.0)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListVSpherePair(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListVSpherePair.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListVSpherePair.getVerletList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.VerletListVSpherePair.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_VSpherePair, interaction_VerletListVSpherePair
class VSpherePairLocal(PotentialLocal, interaction_VSpherePair):
def __init__(self, epsilon=1.0, cutoff=infinity, shift="auto"):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_VSpherePair,
epsilon, cutoff)
else:
cxxinit(self, interaction_VSpherePair,
epsilon, cutoff, shift)
class VerletListVSpherePairLocal(InteractionLocal, interaction_VerletListVSpherePair):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListVSpherePair, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
if pmi.isController:
class VSpherePair(Potential):
'The Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.VSpherePairLocal',
pmiproperty = ['epsilon']
)
class VerletListVSpherePair(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListVSpherePairLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
|
espressopp/espressopp
|
src/interaction/VSpherePair.py
|
Python
|
gpl-3.0
| 4,812
|
[
"ESPResSo"
] |
310e9496b97badd78b7efd6164b503cec1573cc178aaf156127a6f1b1c51698f
|
import struct, time, array, os
from math import pi
from Sire.Maths import Vector
from Sire.Mol import *
from Sire.IO import *
#
# Adapted from Peter Eastman's code in OpenMM python API to write a DCD file
#
class DCDFile(object):
"""DCDFile provides methods for creating DCD files.
DCD is a file format for storing simulation trajectories. It is supported by many programs, such
as CHARMM, NAMD, and X-PLOR. Note, however, that different programs produce subtly different
versions of the format. This class generates the CHARMM version. Also note that there is no
standard byte ordering (big-endian or little-endian) for this format. This class always generates
files with little-endian ordering.
To use this class, create a DCDFile object, then call writeModel() once for each model in the file."""
def __init__(self, strfile, group, space, dt, firstStep=0, interval=1):
"""Create a DCD file and write out the header.
Parameters:
- file (file) A file to write to
- topology (Topology) The Topology defining the molecular system being written
- dt (time) The time step used in the trajectory
- firstStep (int=0) The index of the first step in the trajectory
- interval (int=1) The frequency (measured in time steps) at which states are written to the trajectory
"""
file = open(strfile,'wb')
#PDB().write(group, "%s.pdb" % strfile)
self._file = file
self._group = group
self._space = space
self._firstStep = firstStep
self._interval = interval
self._modelCount = 0
#if is_quantity(dt):
# dt = dt.value_in_unit(picoseconds)
#dt /= 0.04888821
dt = dt.value()
natoms = 0
molecules = group.molecules()
molnums = molecules.molNums()
for molnum in molnums:
mol = molecules.molecule(molnum)[0].molecule()
nat = mol.nAtoms()
natoms += nat
print("There are %s atoms in the group " % natoms)
#sys.exit(-1)
boxFlag = 0
if space.isPeriodic():
boxFlag = 1
header = struct.pack(b'<i4c9if', 84, b'C', b'O', b'R', b'D', 0, firstStep, interval, 0, 0, 0, 0, 0, 0, dt)
header += struct.pack(b'<13i', boxFlag, 0, 0, 0, 0, 0, 0, 0, 0, 24, 84, 164, 2)
header += struct.pack(b'<80s', b'Created by OpenMM')
header += struct.pack(b'<80s', bytes('Created '+time.asctime(time.localtime(time.time())),"utf-8"))
header += struct.pack(b'<4i', 164, 4, natoms, 4)
file.write( header )
def writeModel(self, group, space):
"""Write out a model to the DCD file.
Parameters:
- positions (list) The list of atomic positions to write
"""
#if len(list(self._topology.atoms())) != len(positions):
# raise ValueError('The number of positions must match the number of atoms')
#if is_quantity(positions):
# positions = positions.value_in_unit(nanometers)
file = self._file
# Update the header.
self._modelCount += 1
file.seek(8, os.SEEK_SET)
file.write(struct.pack('<i', self._modelCount))
file.seek(20, os.SEEK_SET)
file.write(struct.pack('<i', self._firstStep+self._modelCount*self._interval))
# Write the data.
file.seek(0, os.SEEK_END)
if space.isPeriodic():
# PeriodicBox.
try:
boxSize = space.dimensions()
file.write(struct.pack('<i6di', 48, boxSize[0], 0, boxSize[1], 0, 0, boxSize[2], 48))
# TriclinicBox.
except:
v0 = space.vector0()
v1 = space.vector1()
v2 = space.vector2()
rad2deg = 180 / pi
alpha = Vector.angle(v1, v2).value() * rad2deg
beta = Vector.angle(v0, v2).value() * rad2deg
gamma = Vector.angle(v1, v0).value() * rad2deg
file.write(struct.pack('<i6di', 48, v0.magnitude(), gamma, v1.magnitude(), beta, alpha, v2.magnitude(), 48))
natoms = 0
for i in range(0,group.nMolecules()):
mol = group[MolIdx(i)][0].molecule()
nat = mol.nAtoms()
natoms += nat
length = struct.pack('<i', 4*natoms)
# To get the positions...
# Loop over that group
nmols = group.nMolecules()
coords = []
#spacedims = space.dimensions()
#wrapmolcoordinates = False
#wrapatomcoordinates = False
# JM 10/14 bugfix change of behavior of QSet in QT5
molnums = group.molNums()
molnums.sort()
for i in range(0,group.nMolecules()):
#mol = group[MolIdx(i)].molecule()
mol = group[molnums[i]][0].molecule()
#print (mol)
molcoords = mol.property("coordinates")
#if wrapmolcoordinates:
# molcog = CenterOfGeometry(mol).point()
#
# wrapdelta = Vector( int( math.floor( molcog.x() / spacedims.x() ) ) ,\
# int( math.floor( molcog.y() / spacedims.y() ) ) ,\
# int( math.floor( molcog.z() / spacedims.z() ) ) )
#
# if ( wrapdelta[0] != 0 or wrapdelta[1] != 0 or wrapdelta[2] != 0):
# print("Mol %s wrapdelta %s %s %s " % (molnum.toString(), wrapdelta[0], wrapdelta[1], wrapdelta[2]))
# print(spacedims)
# print(molcoords.toVector())
# wrap = Vector( - wrapdelta[0] * spacedims.x() , - wrapdelta[1] * spacedims.y(), -wrapdelta[2] * spacedims.z() )
# molcoords.translate(wrap)
# print(molcoords.toVector())
#molcoords.translate(wrapdelta)
#coords += molcoords
coords += molcoords.toVector()
#if wrapatomcoordinates:
# molvec = molcoords.toVector()
# for atvec in molvec:
# wrapdelta = Vector( int( math.floor( atvec.x() / spacedims.x() ) ) ,\
# int( math.floor( atvec.y() / spacedims.y() ) ) ,\
# int( math.floor( atvec.z() / spacedims.z() ) ) )
# if ( wrapdelta[0] != 0 or wrapdelta[1] != 0 or wrapdelta[2] != 0):
# wrap = Vector( - wrapdelta[0] * spacedims.x() , - wrapdelta[1] * spacedims.y(), -wrapdelta[2] * spacedims.z() )
# atvec = atvec + wrap
# coords += atvec
#print coords
#print len(coords)
# Have to study that bit...
for i in range(3):
file.write(length)
data = array.array('f', (x[i] for x in coords))
data.tofile(file)
file.write(length)
def writeBufferedModels(self, group, dimensions):
"""Write out a collection of snapshots to the DCD file.
Parameters:
- positions (list) The list of atomic positions to write
"""
#if len(list(self._topology.atoms())) != len(positions):
# raise ValueError('The number of positions must match the number of atoms')
#if is_quantity(positions):
# positions = positions.value_in_unit(nanometers)
file = self._file
# Find the number of buffered frames we have by inspecting the first molecule in the group
# assuming all molecules have same number of buffered coordinates...
mol = group.first()[0].molecule()
molprops = mol.propertyKeys()
nbuf = 0
for molprop in molprops:
if molprop.startswith("buffered_coord"):
nbuf += 1
if nbuf <= 0:
print("Could not find any buffered coordinates in the passed group ! ")
return
#
# Should be more efficient to loop over all mols once
#
for x in range(0,nbuf):
# Update the header
self._modelCount += 1
file.seek(8, os.SEEK_SET)
file.write(struct.pack('<i', self._modelCount))
file.seek(20, os.SEEK_SET)
file.write(struct.pack('<i', self._firstStep+self._modelCount*self._interval))
# Write the data.
file.seek(0, os.SEEK_END)
# Get buffered space...
boxSize = None
if ("buffered_space_%s" % x) in dimensions:
# PeriodicBox.
try:
boxSize = dimensions["buffered_space_%s" % x].dimensions()
#print "buffered_space_%s" % x, boxSize
if boxSize is not None:
file.write(struct.pack('<i6di', 48, boxSize[0], 0, boxSize[1], 0, 0, boxSize[2], 48))
# TriclinicBox.
except:
v0 = dimensions["buffered_space_%s" % x].vector0()
v1 = dimensions["buffered_space_%s" % x].vector1()
v2 = dimensions["buffered_space_%s" % x].vector2()
rad2deg = 180 / pi
alpha = Vector.angle(v1, v2).value() * rad2deg
beta = Vector.angle(v0, v2).value() * rad2deg
gamma = Vector.angle(v1, v0).value() * rad2deg
file.write(struct.pack('<i6di', 48, v0.magnitude(), gamma, v1.magnitude(), beta, alpha, v2.magnitude(), 48))
natoms = 0
for i in range(0,group.nMolecules()):
mol = group[MolIdx(i)][0].molecule()
nat = mol.nAtoms()
natoms += nat
length = struct.pack('<i', 4*natoms)
# To get the positions...
# Loop over that group
nmols = group.nMolecules()
coords = []
# JM 10/14 bugfix change of behavior of QSet in QT5
molnums = group.molNums()
molnums.sort()
for i in range(0,group.nMolecules()):
#mol = group[MolIdx(i)].molecule()
mol = group[molnums[i]][0]
molcoords = mol.property("buffered_coord_%s" % x)
coords += molcoords.toVector()
# Have to study that bit...
for i in range(3):
file.write(length)
data = array.array('f', (x[i] for x in coords))
data.tofile(file)
file.write(length)
#rewind
file.seek(0, os.SEEK_SET)
|
michellab/Sire
|
wrapper/Tools/DCDFile.py
|
Python
|
gpl-2.0
| 10,606
|
[
"CHARMM",
"NAMD",
"OpenMM"
] |
2d0901d78d04d07e78ef0f3627782e612692e13536f2eb3492e2bccb063da109
|
# Copyright 2004-2015 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy.display
from renpy.display.render import render, Render, Matrix2D
# This file contains displayables that are image-like, because they take
# up a rectangular area of the screen, and do not respond to input.
class Solid(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
A displayable that fills the area its assigned with `color`.
::
image white = Solid("#fff")
"""
def __init__(self, color, **properties):
super(Solid, self).__init__(**properties)
if color is not None:
self.color = renpy.easy.color(color)
else:
self.color = None
def __hash__(self):
return hash(self.color)
def __eq__(self, o):
if not self._equals(o):
return False
return (self.color == o.color)
def visit(self):
return [ ]
def render(self, width, height, st, at):
width = max(self.style.xminimum, width)
height = max(self.style.yminimum, height)
color = self.color or self.style.color
rv = Render(width, height)
if color is None or width <= 0 or height <= 0:
return rv
SIZE = 10
if width < SIZE or height < SIZE:
tex = renpy.display.draw.solid_texture(width, height, color)
else:
tex = renpy.display.draw.solid_texture(SIZE, SIZE, color)
rv.forward = Matrix2D(1.0 * SIZE / width, 0, 0, 1.0 * SIZE / height)
rv.reverse = Matrix2D(1.0 * width / SIZE, 0, 0, 1.0 * height / SIZE)
rv.blit(tex, (0, 0))
return rv
class Frame(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
:args: (image, left, top, right=None, bottom=None, tile=False, **properties)
A displayable that resizes an image to fill the available area,
while preserving the width and height of its borders. is often
used as the background of a window or button.
.. figure:: frame_example.png
Using a frame to resize an image to double its size.
`image`
An image manipulator that will be resized by this frame.
`left`
The size of the border on the left side.
`top`
The size of the border on the top.
`right`
The size of the border on the right side. If None, defaults
to `left`.
`bottom`
The side of the border on the bottom. If None, defaults to `top`.
`tile`
If true, tiling is used to resize sections of the image,
rather than scaling.
::
# Resize the background of the text window if it's too small.
init python:
style.window.background = Frame("frame.png", 10, 10)
"""
__version__ = 1
def after_upgrade(self, version):
if version < 2:
self.left = self.xborder
self.right = self.xborder
self.top = self.yborder
self.bottom = self.yborder
def __init__(self, image, left=None, top=None, right=None, bottom=None, xborder=None, yborder=None, bilinear=True, tile=False, **properties):
super(Frame, self).__init__(**properties)
self.image = renpy.easy.displayable(image)
self.tile = tile
# Compat for old argument names.
if left is None:
left = xborder
if top is None:
top= yborder
if right is None:
right = left
if bottom is None:
bottom = top
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def __eq__(self, o):
if not self._equals(o):
return False
if self.image != o.image:
return False
if self.left != o.left:
return False
if self.top != o.top:
return False
if self.right != o.right:
return False
if self.bottom != o.bottom:
return False
if self.tile != o.tile:
return False
def render(self, width, height, st, at):
width = max(self.style.xminimum, width)
height = max(self.style.yminimum, height)
crend = render(self.image, width, height, st, at)
sw, sh = crend.get_size()
sw = int(sw)
sh = int(sh)
dw = int(width)
dh = int(height)
bw = self.left + self.right
bh = self.top + self.bottom
xborder = min(bw, sw - 2, dw)
if xborder:
left = self.left * xborder / bw
right = self.right * xborder / bw
else:
left = 0
right = 0
yborder = min(bh, sh - 2, dh)
if yborder:
top = self.top * yborder / bh
bottom = self.bottom * yborder / bh
else:
top = 0
bottom = 0
if renpy.display.draw.info["renderer"] == "sw":
return self.sw_render(crend, dw, dh, left, top, right, bottom)
def draw(x0, x1, y0, y1):
# Compute the coordinates of the left, right, top, and
# bottom sides of the region, for both the source and
# destination surfaces.
# left side.
if x0 >= 0:
dx0 = x0
sx0 = x0
else:
dx0 = dw + x0
sx0 = sw + x0
# right side.
if x1 > 0:
dx1 = x1
sx1 = x1
else:
dx1 = dw + x1
sx1 = sw + x1
# top side.
if y0 >= 0:
dy0 = y0
sy0 = y0
else:
dy0 = dh + y0
sy0 = sh + y0
# bottom side
if y1 > 0:
dy1 = y1
sy1 = y1
else:
dy1 = dh + y1
sy1 = sh + y1
# Quick exit.
if sx0 == sx1 or sy0 == sy1:
return
# Compute sizes.
csw = sx1 - sx0
csh = sy1 - sy0
cdw = dx1 - dx0
cdh = dy1 - dy0
if csw <= 0 or csh <= 0 or cdh <= 0 or cdw <= 0:
return
# Get a subsurface.
cr = crend.subsurface((sx0, sy0, csw, csh))
# Scale or tile if we have to.
if csw != cdw or csh != cdh:
if self.tile:
newcr = Render(cdw, cdh)
newcr.clipping = True
for x in xrange(0, cdw, csw):
for y in xrange(0, cdh, csh):
newcr.blit(cr, (x, y))
cr = newcr
else:
newcr = Render(cdw, cdh)
newcr.forward = Matrix2D(1.0 * csw / cdw, 0, 0, 1.0 * csh / cdh)
newcr.reverse = Matrix2D(1.0 * cdw / csw, 0, 0, 1.0 * cdh / csh)
newcr.blit(cr, (0, 0))
cr = newcr
# Blit.
rv.blit(cr, (dx0, dy0))
return
rv = Render(dw, dh)
self.draw_pattern(draw, left, top, right, bottom)
return rv
def draw_pattern(self, draw, left, top, right, bottom):
# Top row.
if top:
if left:
draw(0, left, 0, top)
draw(left, -right, 0, top)
if right:
draw(-right, 0, 0, top)
# Middle row.
if left:
draw(0, left, top, -bottom)
draw(left, -right, top, -bottom)
if right:
draw(-right, 0, top, -bottom)
# Bottom row.
if bottom:
if left:
draw(0, left, -bottom, 0)
draw(left, -right, -bottom, 0)
if right:
draw(-right, 0, -bottom, 0)
def sw_render(self, crend, dw, dh, left, top, right, bottom):
source = crend.render_to_texture(True)
sw, sh = source.get_size()
dest = renpy.display.swdraw.surface(dw, dh, True)
rv = dest
def draw(x0, x1, y0, y1):
# Compute the coordinates of the left, right, top, and
# bottom sides of the region, for both the source and
# destination surfaces.
# left side.
if x0 >= 0:
dx0 = x0
sx0 = x0
else:
dx0 = dw + x0
sx0 = sw + x0
# right side.
if x1 > 0:
dx1 = x1
sx1 = x1
else:
dx1 = dw + x1
sx1 = sw + x1
# top side.
if y0 >= 0:
dy0 = y0
sy0 = y0
else:
dy0 = dh + y0
sy0 = sh + y0
# bottom side
if y1 > 0:
dy1 = y1
sy1 = y1
else:
dy1 = dh + y1
sy1 = sh + y1
# Quick exit.
if sx0 == sx1 or sy0 == sy1 or dx1 <= dx0 or dy1 <= dy0:
return
# Compute sizes.
srcsize = (sx1 - sx0, sy1 - sy0)
dstsize = (int(dx1 - dx0), int(dy1 - dy0))
# Get a subsurface.
surf = source.subsurface((sx0, sy0, srcsize[0], srcsize[1]))
# Scale or tile if we have to.
if dstsize != srcsize:
if self.tile:
tilew, tileh = srcsize
dstw, dsth = dstsize
surf2 = renpy.display.pgrender.surface_unscaled(dstsize, surf)
for y in range(0, dsth, tileh):
for x in range(0, dstw, tilew):
surf2.blit(surf, (x, y))
surf = surf2
else:
surf2 = renpy.display.scale.real_transform_scale(surf, dstsize)
surf = surf2
# Blit.
dest.blit(surf, (dx0, dy0))
self.draw_pattern(draw, left, top, right, bottom)
rrv = renpy.display.render.Render(dw, dh)
rrv.blit(rv, (0, 0))
rrv.depends_on(crend)
# And, finish up.
return rrv
def visit(self):
return [ self.image ]
class FileCurrentScreenshot(renpy.display.core.Displayable):
"""
:doc: file_action_function
A displayable that shows the screenshot that will be saved with the current
file, if a screenshot has been taken when entering a menu or with
:func:`FileTakeScreenshot`.
If there is no current screenshot, `empty` is shown in its place. (If `empty` is
None, it defaults to :func:`Null`.)
"""
def __init__(self, empty=None, **properties):
super(FileCurrentScreenshot, self).__init__(**properties)
if empty is None:
empty = renpy.display.layout.Null()
self.empty = empty
def render(self, width, height, st, at):
ss = renpy.display.interface.screenshot_surface
if ss is None:
return renpy.display.render.render(self.empty, width, height, st, at)
tex = renpy.display.draw.load_texture(ss)
w, h = tex.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(tex, (0, 0))
return rv
|
joxer/Baka-No-Voltron
|
tmp/android.dist/private/renpy/display/imagelike.py
|
Python
|
gpl-2.0
| 12,406
|
[
"VisIt"
] |
e7572624eef10f6d49d08c2f773d0172db971e18d0e42cb5a957afd6b0280c92
|
"""
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import os.path
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
@cherrypy.expose
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
class AnotherPage(Page):
title = 'Another Page'
@cherrypy.expose
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
|
JonnyWong16/plexpy
|
lib/cherrypy/tutorial/tut05_derived_objects.py
|
Python
|
gpl-3.0
| 2,141
|
[
"exciting"
] |
bb42c19d44d6f039dec4402da30849d2c8de02aa7aad2d3ae533364177c36352
|
""" Encapsulate here the logic for matching jobs
Utilities and classes here are used by MatcherHandler
"""
__RCSID__ = "$Id"
import time
from DIRAC import gLogger
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.Limiter import Limiter
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB, singleValueDefFields, multiValueMatchFields
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
class Matcher( object ):
""" Logic for matching
"""
def __init__( self, pilotAgentsDB = None, jobDB = None, tqDB = None, jlDB = None, opsHelper = None ):
""" c'tor
"""
if pilotAgentsDB:
self.pilotAgentsDB = pilotAgentsDB
else:
self.pilotAgentsDB = PilotAgentsDB()
if jobDB:
self.jobDB = jobDB
else:
self.jobDB = JobDB()
if tqDB:
self.tqDB = tqDB
else:
self.tqDB = TaskQueueDB()
if jlDB:
self.jlDB = jlDB
else:
self.jlDB = JobLoggingDB()
if opsHelper:
self.opsHelper = opsHelper
else:
self.opsHelper = Operations()
self.log = gLogger.getSubLogger( "Matcher" )
self.limiter = Limiter( jobDB = self.jobDB, opsHelper = self.opsHelper )
def selectJob( self, resourceDescription, credDict ):
""" Main job selection function to find the highest priority job matching the resource capacity
"""
startTime = time.time()
resourceDict = self._getResourceDict( resourceDescription, credDict )
negativeCond = self.limiter.getNegativeCondForSite( resourceDict['Site'] )
result = self.tqDB.matchAndGetJob( resourceDict, negativeCond = negativeCond )
if not result['OK']:
raise RuntimeError( result['Message'] )
result = result['Value']
if not result['matchFound']:
self.log.info( "No match found" )
return {}
jobID = result['jobId']
resAtt = self.jobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup', 'Status'] )
if not resAtt['OK']:
raise RuntimeError( 'Could not retrieve job attributes' )
if not resAtt['Value']:
raise RuntimeError( "No attributes returned for job" )
if not resAtt['Value']['Status'] == 'Waiting':
self.log.error( 'Job matched by the TQ is not in Waiting state', str( jobID ) )
result = self.tqDB.deleteJob( jobID )
if not result[ 'OK' ]:
raise RuntimeError( result['Message'] )
raise RuntimeError( "Job %s is not in Waiting state" % str( jobID ) )
self._reportStatus( resourceDict, jobID )
result = self.jobDB.getJobJDL( jobID )
if not result['OK']:
raise RuntimeError( "Failed to get the job JDL" )
resultDict = {}
resultDict['JDL'] = result['Value']
resultDict['JobID'] = jobID
matchTime = time.time() - startTime
self.log.info( "Match time: [%s]" % str( matchTime ) )
gMonitor.addMark( "matchTime", matchTime )
# Get some extra stuff into the response returned
resOpt = self.jobDB.getJobOptParameters( jobID )
if resOpt['OK']:
for key, value in resOpt['Value'].items():
resultDict[key] = value
resAtt = self.jobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup'] )
if not resAtt['OK']:
raise RuntimeError( 'Could not retrieve job attributes' )
if not resAtt['Value']:
raise RuntimeError( 'No attributes returned for job' )
if self.opsHelper.getValue( "JobScheduling/CheckMatchingDelay", True ):
self.limiter.updateDelayCounters( resourceDict['Site'], jobID )
pilotInfoReportedFlag = resourceDict.get( 'PilotInfoReportedFlag', False )
if not pilotInfoReportedFlag:
self._updatePilotInfo( resourceDict )
self._updatePilotJobMapping( resourceDict, jobID )
resultDict['DN'] = resAtt['Value']['OwnerDN']
resultDict['Group'] = resAtt['Value']['OwnerGroup']
resultDict['PilotInfoReportedFlag'] = True
return resultDict
def _getResourceDict( self, resourceDescription, credDict ):
""" from resourceDescription to resourceDict (just various mods)
"""
resourceDict = self._processResourceDescription( resourceDescription )
resourceDict = self._checkCredentials( resourceDict, credDict )
self._checkPilotVersion( resourceDict )
if not self._checkMask( resourceDict ):
# Banned destinations can only take Test jobs
resourceDict['JobType'] = 'Test'
self.log.verbose( "Resource description:" )
for key in resourceDict:
self.log.verbose( "%s : %s" % ( key.rjust( 20 ), resourceDict[ key ] ) )
return resourceDict
def _processResourceDescription( self, resourceDescription ):
""" Check and form the resource description dictionary
resourceDescription is a ceDict coming from a JobAgent, for example.
"""
resourceDict = {}
if isinstance( resourceDescription, basestring ):
classAdAgent = ClassAd( resourceDescription )
if not classAdAgent.isOK():
raise ValueError( 'Illegal Resource JDL' )
self.log.verbose( classAdAgent.asJDL() )
for name in singleValueDefFields:
if classAdAgent.lookupAttribute( name ):
if name == 'CPUTime':
resourceDict[name] = classAdAgent.getAttributeInt( name )
else:
resourceDict[name] = classAdAgent.getAttributeString( name )
for name in multiValueMatchFields:
if classAdAgent.lookupAttribute( name ):
if name == 'SubmitPool':
resourceDict[name] = classAdAgent.getListFromExpression( name )
else:
resourceDict[name] = classAdAgent.getAttributeString( name )
# Check if a JobID is requested
if classAdAgent.lookupAttribute( 'JobID' ):
resourceDict['JobID'] = classAdAgent.getAttributeInt( 'JobID' )
for k in ( 'DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization' ):
if classAdAgent.lookupAttribute( k ):
resourceDict[ k ] = classAdAgent.getAttributeString( k )
else:
for name in singleValueDefFields:
if resourceDescription.has_key( name ):
resourceDict[name] = resourceDescription[name]
for name in multiValueMatchFields:
if resourceDescription.has_key( name ):
resourceDict[name] = resourceDescription[name]
if 'JobID' in resourceDescription:
resourceDict['JobID'] = resourceDescription['JobID']
# Convert MaxRAM and NumberOfCores parameters into a list of tags
maxRAM = resourceDescription.get( 'MaxRAM' )
nCores = resourceDescription.get( 'NumberOfProcessors' )
for param, key in [ ( maxRAM, 'GB' ), ( nCores, 'Cores' ) ]:
if param:
try:
intValue = int( param )/1000
if intValue <= 128:
paramList = range( 1, intValue + 1 )
paramTags = [ '%d%s' % ( par, key ) for par in paramList ]
resourceDict.setdefault( "Tag", [] ).extend( paramTags )
except ValueError:
pass
if 'Tag' in resourceDict:
resourceDict['Tag'] = list( set( resourceDict['Tag'] ) )
for k in ( 'DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization',
'PilotReference', 'PilotBenchmark', 'PilotInfoReportedFlag' ):
if k in resourceDescription:
resourceDict[ k ] = resourceDescription[ k ]
return resourceDict
def _reportStatus( self, resourceDict, jobID ):
""" Reports the status of the matched job in jobDB and jobLoggingDB
Do not fail if errors happen here
"""
attNames = ['Status', 'MinorStatus', 'ApplicationStatus', 'Site']
attValues = ['Matched', 'Assigned', 'Unknown', resourceDict['Site']]
result = self.jobDB.setJobAttributes( jobID, attNames, attValues )
if not result['OK']:
self.log.error( "Problem reporting job status", "setJobAttributes, jobID = %s: %s" % ( jobID, result['Message'] ) )
else:
self.log.verbose( "Set job attributes for jobID %s" % jobID )
result = self.jlDB.addLoggingRecord( jobID,
status = 'Matched',
minor = 'Assigned',
source = 'Matcher' )
if not result['OK']:
self.log.error( "Problem reporting job status", "addLoggingRecord, jobID = %s: %s" % ( jobID, result['Message'] ) )
else:
self.log.verbose( "Added logging record for jobID %s" % jobID )
def _checkMask( self, resourceDict ):
""" Check the mask: are we allowed to run normal jobs?
FIXME: should we move to site OR SE?
"""
if not 'Site' in resourceDict:
self.log.error( "Missing Site Name in Resource JDL" )
raise RuntimeError( "Missing Site Name in Resource JDL" )
# Get common site mask and check the agent site
result = self.jobDB.getSiteMask( siteState = 'Active' )
if not result['OK']:
self.log.error( "Internal error", "getSiteMask: %s" % result['Message'] )
raise RuntimeError( "Internal error" )
maskList = result['Value']
if resourceDict['Site'] not in maskList:
return False
return True
def _updatePilotInfo( self, resourceDict ):
""" Update pilot information - do not fail if we don't manage to do it
"""
pilotReference = resourceDict.get( 'PilotReference', '' )
if pilotReference:
gridCE = resourceDict.get( 'GridCE', 'Unknown' )
site = resourceDict.get( 'Site', 'Unknown' )
benchmark = resourceDict.get( 'PilotBenchmark', 0.0 )
self.log.verbose( 'Reporting pilot info for %s: gridCE=%s, site=%s, benchmark=%f' % ( pilotReference, gridCE, site, benchmark ) )
result = self.pilotAgentsDB.setPilotStatus( pilotReference, status = 'Running', gridSite = site,
destination = gridCE, benchmark = benchmark )
if not result['OK']:
self.log.error( "Problem updating pilot information",
"; setPilotStatus. pilotReference: %s; %s" % ( pilotReference, result['Message'] ) )
def _updatePilotJobMapping( self, resourceDict, jobID ):
""" Update pilot to job mapping information
"""
pilotReference = resourceDict.get( 'PilotReference', '' )
if pilotReference:
result = self.pilotAgentsDB.setCurrentJobID( pilotReference, jobID )
if not result['OK']:
self.log.error( "Problem updating pilot information",
";setCurrentJobID. pilotReference: %s; %s" % ( pilotReference, result['Message'] ) )
result = self.pilotAgentsDB.setJobForPilot( jobID, pilotReference, updateStatus = False )
if not result['OK']:
self.log.error( "Problem updating pilot information",
"; setJobForPilot. pilotReference: %s; %s" % ( pilotReference, result['Message'] ) )
def _checkCredentials( self, resourceDict, credDict ):
""" Check if we can get a job given the passed credentials
"""
if Properties.GENERIC_PILOT in credDict[ 'properties' ]:
# You can only match groups in the same VO
if credDict[ 'group' ] == "hosts":
# for the host case the VirtualOrganization parameter
# is mandatory in resourceDict
vo = resourceDict.get( 'VirtualOrganization', '' )
else:
vo = Registry.getVOForGroup( credDict[ 'group' ] )
result = Registry.getGroupsForVO( vo )
if result[ 'OK' ]:
resourceDict[ 'OwnerGroup' ] = result[ 'Value' ]
else:
self.log.info( "host matching, matching only resource credentials: %s" % resourceDict )
else:
# If it's a private pilot, the DN has to be the same
if Properties.PILOT in credDict[ 'properties' ]:
self.log.notice( "Setting the resource DN to the credentials DN" )
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
# If it's a job sharing. The group has to be the same and just check that the DN (if any)
# belongs to the same group
elif Properties.JOB_SHARING in credDict[ 'properties' ]:
resourceDict[ 'OwnerGroup' ] = credDict[ 'group' ]
self.log.notice( "Setting the resource group to the credentials group" )
if 'OwnerDN' in resourceDict and resourceDict[ 'OwnerDN' ] != credDict[ 'DN' ]:
ownerDN = resourceDict[ 'OwnerDN' ]
result = Registry.getGroupsForDN( resourceDict[ 'OwnerDN' ] )
if not result[ 'OK' ]:
raise RuntimeError( result['Message'] )
if credDict[ 'group' ] not in result[ 'Value' ]:
# DN is not in the same group! bad boy.
self.log.notice( "You cannot request jobs from DN %s. It does not belong to your group!" % ownerDN )
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
# Nothing special, group and DN have to be the same
else:
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
resourceDict[ 'OwnerGroup' ] = credDict[ 'group' ]
return resourceDict
def _checkPilotVersion( self, resourceDict ):
""" Check the pilot DIRAC version
"""
if self.opsHelper.getValue( "Pilot/CheckVersion", True ):
if 'ReleaseVersion' not in resourceDict:
if not 'DIRACVersion' in resourceDict:
raise RuntimeError( 'Version check requested and not provided by Pilot' )
else:
pilotVersion = resourceDict['DIRACVersion']
else:
pilotVersion = resourceDict['ReleaseVersion']
validVersions = self.opsHelper.getValue( "Pilot/Version", [] )
if validVersions and pilotVersion not in validVersions:
raise RuntimeError( 'Pilot version does not match the production version %s not in ( %s )' % \
( pilotVersion, ",".join( validVersions ) ) )
# Check project if requested
validProject = self.opsHelper.getValue( "Pilot/Project", "" )
if validProject:
if 'ReleaseProject' not in resourceDict:
raise RuntimeError( "Version check requested but expected project %s not received" % validProject )
if resourceDict[ 'ReleaseProject' ] != validProject:
raise RuntimeError( "Version check requested but expected project %s != received %s" % ( validProject,
resourceDict[ 'ReleaseProject' ] ) )
|
vmendez/DIRAC
|
WorkloadManagementSystem/Client/Matcher.py
|
Python
|
gpl-3.0
| 14,676
|
[
"DIRAC"
] |
8a8a9cbf8e9b9d2db90a20e9da09988d6ec6d7767290a59deb5f4d427eea3250
|
"""Generated message classes for testing version v1.
Allows developers to run automated tests for their mobile applications on
Google infrastructure.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudsdk.third_party.apitools.base.protorpclite import messages as _messages
from googlecloudsdk.third_party.apitools.base.py import encoding
package = 'testing'
class AndroidDevice(_messages.Message):
"""A single Android device.
Fields:
androidModelId: The id of the Android device to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
androidVersionId: The id of the Android OS version to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
locale: The locale the test device used for testing. Use the
EnvironmentDiscoveryService to get supported options. Required
orientation: How the device is oriented during the test. Use the
EnvironmentDiscoveryService to get supported options. Required
"""
androidModelId = _messages.StringField(1)
androidVersionId = _messages.StringField(2)
locale = _messages.StringField(3)
orientation = _messages.StringField(4)
class AndroidDeviceCatalog(_messages.Message):
"""The currently supported Android devices.
Fields:
models: The set of supported Android device models. @OutputOnly
runtimeConfiguration: The set of supported runtime configurations.
@OutputOnly
versions: The set of supported Android OS versions. @OutputOnly
"""
models = _messages.MessageField('AndroidModel', 1, repeated=True)
runtimeConfiguration = _messages.MessageField('AndroidRuntimeConfiguration', 2)
versions = _messages.MessageField('AndroidVersion', 3, repeated=True)
class AndroidInstrumentationTest(_messages.Message):
"""A test of an Android application that can control an Android component
independently of its normal lifecycle. Android instrumentation tests run an
application APK and test APK inside the same process on a virtual or
physical AndroidDevice. They also specify a test runner class, such as
com.google.GoogleTestRunner, which can vary on the specific instrumentation
framework chosen. See
<http://developer.android.com/tools/testing/testing_android.html> for more
information on types of Android tests.
Fields:
appApk: The APK for the application under test. Required
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
testApk: The APK containing the test code to be executed. Required
testPackageId: The java package for the test to be executed. Optional,
default is determined by examining the application's manifest.
testRunnerClass: The InstrumentationTestRunner class. Optional, default is
determined by examining the application's manifest.
testTargets: Each target must be fully qualified with the package name or
class name, in one of these formats: - "package package_name" - "class
package_name.class_name" - "class package_name.class_name#method_name"
If empty, all targets in the module will be run.
"""
appApk = _messages.MessageField('FileReference', 1)
appPackageId = _messages.StringField(2)
testApk = _messages.MessageField('FileReference', 3)
testPackageId = _messages.StringField(4)
testRunnerClass = _messages.StringField(5)
testTargets = _messages.StringField(6, repeated=True)
class AndroidMatrix(_messages.Message):
"""A set of Android device configuration permutations is defined by the the
cross-product of the given axes. Internally, the given AndroidMatrix will
be expanded into a set of AndroidDevices. Only supported permutations will
be instantiated. Invalid permutations (e.g., incompatible models/versions)
are ignored.
Fields:
androidModelIds: The ids of the set of Android device to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
androidVersionIds: The ids of the set of Android OS version to be used.
Use the EnvironmentDiscoveryService to get supported options. Required
locales: The set of locales the test device will enable for testing. Use
the EnvironmentDiscoveryService to get supported options. Required
orientations: The set of orientations to test with. Use the
EnvironmentDiscoveryService to get supported options. Required
"""
androidModelIds = _messages.StringField(1, repeated=True)
androidVersionIds = _messages.StringField(2, repeated=True)
locales = _messages.StringField(3, repeated=True)
orientations = _messages.StringField(4, repeated=True)
class AndroidModel(_messages.Message):
"""A description of an Android device tests may be run on.
Enums:
FormValueValuesEnum: Whether this device is virtual or physical.
@OutputOnly
Fields:
brand: The company that this device is branded with. Example: "Google",
"Samsung" @OutputOnly
codename: The name of the industrial design. This corresponds to
android.os.Build.DEVICE @OutputOnly
form: Whether this device is virtual or physical. @OutputOnly
id: The unique opaque id for this model. Use this for invoking the
TestExecutionService. @OutputOnly
manufacturer: The manufacturer of this device. @OutputOnly
name: The human-readable marketing name for this device model. Examples:
"Nexus 5", "Galaxy S5" @OutputOnly
screenX: Screen size in the horizontal (X) dimension measured in pixels.
@OutputOnly
screenY: Screen size in the vertical (Y) dimension measured in pixels.
@OutputOnly
supportedAbis: The list of supported ABIs for this device. This
corresponds to either android.os.Build.SUPPORTED_ABIS (for API level 21
and above) or android.os.Build.CPU_ABI/CPU_ABI2. @OutputOnly
supportedVersionIds: The set of Android versions this device supports.
@OutputOnly
tags: Tags for this dimension. Examples: "default", "preview",
"deprecated"
"""
class FormValueValuesEnum(_messages.Enum):
"""Whether this device is virtual or physical. @OutputOnly
Values:
DEVICE_FORM_UNSPECIFIED: Do not use. For proto versioning only.
VIRTUAL: A software stack that simulates the device
PHYSICAL: Actual hardware
"""
DEVICE_FORM_UNSPECIFIED = 0
VIRTUAL = 1
PHYSICAL = 2
brand = _messages.StringField(1)
codename = _messages.StringField(2)
form = _messages.EnumField('FormValueValuesEnum', 3)
id = _messages.StringField(4)
manufacturer = _messages.StringField(5)
name = _messages.StringField(6)
screenX = _messages.IntegerField(7, variant=_messages.Variant.INT32)
screenY = _messages.IntegerField(8, variant=_messages.Variant.INT32)
supportedAbis = _messages.StringField(9, repeated=True)
supportedVersionIds = _messages.StringField(10, repeated=True)
tags = _messages.StringField(11, repeated=True)
class AndroidMonkeyTest(_messages.Message):
"""A test of an Android application that uses the UI/Application Exerciser
Monkey from the Android SDK. (Not to be confused with the "monkeyrunner"
tool, which is also included in the SDK.) See
http://developer.android.com/tools/help/monkey.html for details.
Fields:
appApk: The APK for the application under test. Required
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
eventCount: Number of random monkey events (e.g. clicks, touches) to
generate. Defaults to 2000.
eventDelay: Fixed delay between events. Defaults to 10ms.
randomSeed: Seed value for pseudo-random number generator. Note that,
although specifying a seed causes the monkey to generate the same
sequence of events, it does not guarantee that a particular outcome will
be reproducible across runs. Optional
"""
appApk = _messages.MessageField('FileReference', 1)
appPackageId = _messages.StringField(2)
eventCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
eventDelay = _messages.StringField(4)
randomSeed = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class AndroidRoboTest(_messages.Message):
"""A test of an android application that explores the application on a
virtual or physical Android Device, finding culprits and crashes as it goes.
Fields:
appApk: The APK for the application under test. Required
appInitialActivity: The initial activity that should be used to start the
app. Optional
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
maxDepth: The max depth of the traversal stack Robo can explore. Needs to
be at least 2 to make Robo explore the app beyond the first activity.
Default is 50. Optional
maxSteps: The max number of steps Robo can execute. Default is no limit.
Optional
randomizeSteps: Whether Robo follows a random order of steps on a given
activity state. Optional
"""
appApk = _messages.MessageField('FileReference', 1)
appInitialActivity = _messages.StringField(2)
appPackageId = _messages.StringField(3)
maxDepth = _messages.IntegerField(4, variant=_messages.Variant.INT32)
maxSteps = _messages.IntegerField(5, variant=_messages.Variant.INT32)
randomizeSteps = _messages.BooleanField(6)
class AndroidRuntimeConfiguration(_messages.Message):
"""Configuration that can be selected at the time a test is run.
Fields:
locales: The set of available locales. @OutputOnly
orientations: The set of available orientations. @OutputOnly
"""
locales = _messages.MessageField('Locale', 1, repeated=True)
orientations = _messages.MessageField('Orientation', 2, repeated=True)
class AndroidVersion(_messages.Message):
"""A version of the Android OS
Fields:
apiLevel: The API level for this Android version. Examples: 18, 19
@OutputOnly
codeName: The code name for this Android version. Examples: "JellyBean",
"KitKat" @OutputOnly
distribution: Market share for this version. @OutputOnly
id: An opaque id for this Android version. Use this id to invoke the
TestExecutionService. @OutputOnly
releaseDate: The date this Android version became available in the market.
@OutputOnly
tags: Tags for this dimension. Examples: "default", "preview",
"deprecated"
versionString: A string representing this version of the Android OS.
Examples: "4.3", "4.4" @OutputOnly
"""
apiLevel = _messages.IntegerField(1, variant=_messages.Variant.INT32)
codeName = _messages.StringField(2)
distribution = _messages.MessageField('Distribution', 3)
id = _messages.StringField(4)
releaseDate = _messages.MessageField('Date', 5)
tags = _messages.StringField(6, repeated=True)
versionString = _messages.StringField(7)
class BlobstoreFile(_messages.Message):
"""Reference to a blob in Blobstore.
Fields:
blobId: A blob ID. Example: /android_test/blobs/4e9AAT9sqHRY_oBBzIKHSEFgg
md5Hash: The MD5 hash of the referenced blob. (This is necessary to create
a Bigstore object directly from the Blobstore reference.)
"""
blobId = _messages.StringField(1)
md5Hash = _messages.StringField(2)
class Browser(_messages.Message):
"""An available browser.
Fields:
androidCatalog: The catalog of Android devices for which we offer this
browser. @OutputOnly
id: A human readable id for this Browser version. Use this id to invoke
the TestExecutionService. Examples: "chrome-stable-channel", "firefox-
beta-channel" @OutputOnly
linuxCatalog: The catalog of Linux machines which we offer this browser.
@OutputOnly
name: A string representing the browser name. Examples: "chrome",
"firefox", "ie" @OutputOnly
release: The release of the browser. Examples: "stable-channel", "beta-
channel", "10" (for ie), etc @OutputOnly
versionString: A string representing the version of the browser. Examples:
"42.12.34.1234", "37.01", "10.0.9200.16384" (for ie) @OutputOnly
windowsCatalog: The catalog of Windows machines which we offer this
browser. @OutputOnly
"""
androidCatalog = _messages.MessageField('AndroidDeviceCatalog', 1)
id = _messages.StringField(2)
linuxCatalog = _messages.MessageField('LinuxMachineCatalog', 3)
name = _messages.StringField(4)
release = _messages.StringField(5)
versionString = _messages.StringField(6)
windowsCatalog = _messages.MessageField('WindowsMachineCatalog', 7)
class CancelTestMatrixResponse(_messages.Message):
"""Response containing the current state of the specified test matrix.
Enums:
TestStateValueValuesEnum: The current rolled-up state of the test matrix.
If this state is already final, then the cancelation request will have
no effect.
Fields:
testState: The current rolled-up state of the test matrix. If this state
is already final, then the cancelation request will have no effect.
"""
class TestStateValueValuesEnum(_messages.Enum):
"""The current rolled-up state of the test matrix. If this state is
already final, then the cancelation request will have no effect.
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
testState = _messages.EnumField('TestStateValueValuesEnum', 1)
class ClientInfo(_messages.Message):
"""Information about the client which invoked the test.
Fields:
name: Client name, such as gcloud.
"""
name = _messages.StringField(1)
class ConnectionInfo(_messages.Message):
"""Information needed to connect to services running on the virtual device.
The ssh_port is used to connect to the device, and then the adb_port and
vnc_port on the device can be forwarded to two local ports, to which adb and
vnc can connect, respectively. All of the fields in this message are
provided by the backend.
Fields:
adbPort: Port for ADB (e.g. 5555) NOT user-specified Required
ipAddress: IP address of the device. NOT user-specified Required
sshPort: Port for SSH (e.g. 22) NOT user-specified Required
vncPassword: Password for VNC NOT user-specified Required
vncPort: Port for VNC (e.g. 6444) NOT user-specified Required
"""
adbPort = _messages.IntegerField(1, variant=_messages.Variant.INT32)
ipAddress = _messages.StringField(2)
sshPort = _messages.IntegerField(3, variant=_messages.Variant.INT32)
vncPassword = _messages.StringField(4)
vncPort = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class Date(_messages.Message):
"""Represents a whole calendar date, e.g. date of birth. The time of day and
time zone are either specified elsewhere or are not significant. The date is
relative to the Proleptic Gregorian Calendar. The day may be 0 to represent
a year and month where the day is not significant, e.g. credit card
expiration date. The year may be 0 to represent a month and day independent
of year, e.g. anniversary date. Related types are google.type.TimeOfDay and
`google.protobuf.Timestamp`.
Fields:
day: Day of month. Must be from 1 to 31 and valid for the year and month,
or 0 if specifying a year/month where the day is not significant.
month: Month of year. Must be from 1 to 12.
year: Year of date. Must be from 1 to 9999, or 0 if specifying a date
without a year.
"""
day = _messages.IntegerField(1, variant=_messages.Variant.INT32)
month = _messages.IntegerField(2, variant=_messages.Variant.INT32)
year = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class Device(_messages.Message):
"""A GCE virtual Android device instance.
Enums:
StateValueValuesEnum: State of the device. NOT user-specified
Fields:
androidDevice: The Android device configuration. User-specified Required
creationTime: The time the request to create this device was received. NOT
user-specified
deviceDetails: Information about the backing GCE instance and connection.
NOT user-specified
id: Unique id set by the backend. NOT user-specified
projectId: Project id set by the backend. NOT user-specified
state: State of the device. NOT user-specified
stateDetails: Details about the state of the device. NOT user-specified
"""
class StateValueValuesEnum(_messages.Enum):
"""State of the device. NOT user-specified
Values:
DEVICE_UNSPECIFIED: Do not use. For proto versioning only.
PREPARING: The device is in the process of spinning up.
READY: The device is created and ready to use.
CLOSED: The device has been closed.
DEVICE_ERROR: There has been an error.
"""
DEVICE_UNSPECIFIED = 0
PREPARING = 1
READY = 2
CLOSED = 3
DEVICE_ERROR = 4
androidDevice = _messages.MessageField('AndroidDevice', 1)
creationTime = _messages.StringField(2)
deviceDetails = _messages.MessageField('DeviceDetails', 3)
id = _messages.StringField(4)
projectId = _messages.StringField(5)
state = _messages.EnumField('StateValueValuesEnum', 6)
stateDetails = _messages.MessageField('DeviceStateDetails', 7)
class DeviceDetails(_messages.Message):
"""Details about the GCE instance and connection.
Fields:
connectionInfo: Details about the connection to the device.
gceInstanceDetails: Details about the GCE instance backing the device.
"""
connectionInfo = _messages.MessageField('ConnectionInfo', 1)
gceInstanceDetails = _messages.MessageField('GceInstanceDetails', 2)
class DeviceFile(_messages.Message):
"""A single device file description.
Fields:
obbFile: A ObbFile attribute.
regularFile: A RegularFile attribute.
"""
obbFile = _messages.MessageField('ObbFile', 1)
regularFile = _messages.MessageField('RegularFile', 2)
class DeviceStateDetails(_messages.Message):
"""Additional details about the status of the device.
Fields:
errorDetails: If the DeviceState is ERROR, then this string may contain
human-readable details about the error.
progressDetails: A human-readable, detailed description of the device's
status. For example: "Starting Device\n Device Ready". During the
device's lifespan data may be appended to the progress.
"""
errorDetails = _messages.StringField(1)
progressDetails = _messages.StringField(2)
class Distribution(_messages.Message):
"""Data about the relative number of devices running a given configuration
of the Android platform.
Fields:
marketShare: The estimated fraction (0-1) of the total market with this
configuration. @OutputOnly
measurementTime: The time this distribution was measured. @OutputOnly
"""
marketShare = _messages.FloatField(1)
measurementTime = _messages.StringField(2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class Environment(_messages.Message):
"""The environment in which the test is run.
Fields:
androidDevice: An Android device which must be used with an Android test.
"""
androidDevice = _messages.MessageField('AndroidDevice', 1)
class EnvironmentMatrix(_messages.Message):
"""The matrix of environments in which the test is to be executed.
Fields:
androidMatrix: A matrix of Android devices
"""
androidMatrix = _messages.MessageField('AndroidMatrix', 1)
class FileReference(_messages.Message):
"""A reference to a file, used for user inputs.
Fields:
blob: A blob in Blobstore.
gcsPath: A path to a file in Google Cloud Storage. Example: gs://build-
app-1414623860166/app-debug-unaligned.apk
"""
blob = _messages.MessageField('BlobstoreFile', 1)
gcsPath = _messages.StringField(2)
class GceInstanceDetails(_messages.Message):
"""This information is provided for the user to look up additional details
of the backing GCE instance. It is assumed the user does not modify this
instance. If so, then the device service makes no guarantees about device
functionality.
Fields:
name: Desired instance name of the device. May be user-specified. If not,
the backend will choose a name.
projectId: The GCE project that contains the instance backing this device.
If user-specified, must be the same as the project_id in the
CreateDeviceRequest.
zone: Desired GCE zone for the device user-specified
"""
name = _messages.StringField(1)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class GoogleCloudStorage(_messages.Message):
"""A storage location within Google cloud storage (GCS).
Fields:
gcsPath: The path to a directory in GCS that will eventually contain the
results for this test. The requesting user must have write access on the
bucket in the supplied path.
"""
gcsPath = _messages.StringField(1)
class LinuxMachine(_messages.Message):
"""A single Linux machine.
Fields:
versionId: The version id of the Linux OS to be used. Use the
EnvironmentDiscoveryService to get supported options.
"""
versionId = _messages.StringField(1)
class LinuxMachineCatalog(_messages.Message):
"""The currently supported Linux machines.
Fields:
versions: The set of supported Linux versions. @OutputOnly
"""
versions = _messages.MessageField('LinuxVersion', 1, repeated=True)
class LinuxVersion(_messages.Message):
"""A verison of a Linux OS.
Fields:
id: The unique opaque id for this Linux Version. @OutputOnly
tags: Tags for this version. Examples: "default"
versionString: A string representing this version of the Linux OS.
Examples: "debian-7-wheezy-v20150325", "debian-7-wheezy-v30150325"
@OutputOnly
"""
id = _messages.StringField(1)
tags = _messages.StringField(2, repeated=True)
versionString = _messages.StringField(3)
class ListDevicesResponse(_messages.Message):
"""Response containing a list of devices. Supports pagination.
Fields:
devices: The GCE virtual Android devices to be returned.
nextPageToken: The pagination token to retrieve the next page of device
results.
"""
devices = _messages.MessageField('Device', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListTestMatricesResponse(_messages.Message):
"""Response contain a list of Test Matrices.
Fields:
testMatrices: The set of test matrices.
"""
testMatrices = _messages.MessageField('TestMatrix', 1, repeated=True)
class ListWebDriverResponse(_messages.Message):
"""Response containing a list of WebDriver environments. Supports
pagination.
Fields:
nextPageToken: The pagination token to retrieve the next page of WebDriver
results.
webdriverEnvironments: The WebDriver environments to be returned.
"""
nextPageToken = _messages.StringField(1)
webdriverEnvironments = _messages.MessageField('WebDriver', 2, repeated=True)
class Locale(_messages.Message):
"""A location/region designation for language.
Fields:
id: The id for this locale. Example: "en_US" @OutputOnly
name: A human-friendly name for this language/locale. Example: "English"
@OutputOnly
region: A human-friendy string representing the region for this locale.
Example: "United States" Not present for every locale. @OutputOnly
tags: Tags for this dimension. Examples: "default"
"""
id = _messages.StringField(1)
name = _messages.StringField(2)
region = _messages.StringField(3)
tags = _messages.StringField(4, repeated=True)
class ObbFile(_messages.Message):
"""A ObbFile object.
Fields:
obb: Opaque Binary Blob (OBB) file(s) to install on the device
obbFileName: OBB file name which must conform to the format as specified
by Android e.g. [main|patch].0300110.com.example.android.obb which will
be installed into <shared-storage>/Android/obb/<package-name>/ on the
device
"""
obb = _messages.MessageField('FileReference', 1)
obbFileName = _messages.StringField(2)
class Orientation(_messages.Message):
"""Screen orientation of the device.
Fields:
id: The id for this orientation. Example: "portrait" @OutputOnly
name: A human-friendly name for this orientation. Example: "portrait"
@OutputOnly
tags: Tags for this dimension. Examples: "default"
"""
id = _messages.StringField(1)
name = _messages.StringField(2)
tags = _messages.StringField(3, repeated=True)
class RegularFile(_messages.Message):
"""A file or directory to install on the device before the test starts
Fields:
content: A FileReference attribute.
devicePath: Where to put the content on the device. Must be an absolute,
whitelisted path. If it exists, it will be replaced. The following
device-side directories and any of their subdirectories are whitelisted:
<p>${EXTERNAL_STORAGE}, or /sdcard</p> <p>${ANDROID_DATA}/local/tmp, or
/data/local/tmp</p> <p>Specifying a path outside of these directory
trees is invalid. <p> The paths /sdcard and /data will be made
available and treated as implicit path substitutions. E.g. if /sdcard on
a particular device does not map to external storage, the system will
replace it with the external storage path prefix for that device and
copy the file there. <p> It is strongly advised to use the <a href=
"http://developer.android.com/reference/android/os/Environment.html">
Environment API</a> in app and test code to access files on the device
in a portable way.
"""
content = _messages.MessageField('FileReference', 1)
devicePath = _messages.StringField(2)
class ResultStorage(_messages.Message):
"""Locations where the results of running the test are stored.
Fields:
googleCloudStorage: Required.
toolResultsExecution: The tool results execution that results are written
to. @OutputOnly
toolResultsHistory: The tool results history that contains the tool
results execution that results are written to. If not provided the
service will choose an appropriate value.
"""
googleCloudStorage = _messages.MessageField('GoogleCloudStorage', 1)
toolResultsExecution = _messages.MessageField('ToolResultsExecution', 2)
toolResultsHistory = _messages.MessageField('ToolResultsHistory', 3)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestDetails(_messages.Message):
"""Additional details about the progress of the running test.
Fields:
errorMessage: If the TestState is ERROR, then this string will contain
human-readable details about the error. @OutputOnly
progressMessages: Human-readable, detailed descriptions of the test's
progress. For example: "Provisioning a device", "Starting Test". During
the course of execution new data may be appended to the end of
progress_messages. @OutputOnly
"""
errorMessage = _messages.StringField(1)
progressMessages = _messages.StringField(2, repeated=True)
class TestEnvironmentCatalog(_messages.Message):
"""A description of a test environment.
Fields:
androidDeviceCatalog: Android devices suitable for running Android
Instrumentation Tests.
webDriverCatalog: WebDriver environments suitable for running web tests.
"""
androidDeviceCatalog = _messages.MessageField('AndroidDeviceCatalog', 1)
webDriverCatalog = _messages.MessageField('WebDriverCatalog', 2)
class TestExecution(_messages.Message):
"""Specifies a single test to be executed in a single environment.
Enums:
StateValueValuesEnum: Indicates the current progress of the test execution
(e.g., FINISHED). @OutputOnly
Fields:
environment: How the host machine(s) are configured. @OutputOnly
id: Unique id set by the backend. @OutputOnly
matrixId: Id of the containing TestMatrix. @OutputOnly
projectId: The cloud project that owns the test execution. @OutputOnly
state: Indicates the current progress of the test execution (e.g.,
FINISHED). @OutputOnly
testDetails: Additional details about the running test. @OutputOnly
testSpecification: How to run the test. @OutputOnly
timestamp: The time this test execution was initially created. @OutputOnly
toolResultsStep: Where the results for this execution are written.
@OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test execution (e.g., FINISHED).
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
environment = _messages.MessageField('Environment', 1)
id = _messages.StringField(2)
matrixId = _messages.StringField(3)
projectId = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
testDetails = _messages.MessageField('TestDetails', 6)
testSpecification = _messages.MessageField('TestSpecification', 7)
timestamp = _messages.StringField(8)
toolResultsStep = _messages.MessageField('ToolResultsStep', 9)
class TestMatrix(_messages.Message):
"""A group of one or more TestExecutions, built by taking a product of
values over a pre-defined set of axes.
Enums:
StateValueValuesEnum: Indicates the current progress of the test matrix
(e.g., FINISHED) @OutputOnly
Fields:
clientInfo: Information about the client which invoked the test.
environmentMatrix: How the host machine(s) are configured. Required
projectId: The cloud project that owns the test matrix. @OutputOnly
resultStorage: Where the results for the matrix are written. Required
state: Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
testExecutions: The list of test executions that the service creates for
this matrix. @OutputOnly
testMatrixId: Unique id set by the service. @OutputOnly
testSpecification: How to run the test. Required
timestamp: The time this test matrix was initially created. @OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
clientInfo = _messages.MessageField('ClientInfo', 1)
environmentMatrix = _messages.MessageField('EnvironmentMatrix', 2)
projectId = _messages.StringField(3)
resultStorage = _messages.MessageField('ResultStorage', 4)
state = _messages.EnumField('StateValueValuesEnum', 5)
testExecutions = _messages.MessageField('TestExecution', 6, repeated=True)
testMatrixId = _messages.StringField(7)
testSpecification = _messages.MessageField('TestSpecification', 8)
timestamp = _messages.StringField(9)
class TestSetup(_messages.Message):
"""A description of how to set up the device prior to running the test
Fields:
directoriesToPull: The directories on the device to upload to GCS at the
end of the test; they must be absolute, whitelisted paths. Refer to
RegularFile for whitelisted paths.
filesToPush: A DeviceFile attribute.
"""
directoriesToPull = _messages.StringField(1, repeated=True)
filesToPush = _messages.MessageField('DeviceFile', 2, repeated=True)
class TestSpecification(_messages.Message):
"""A description of how to run the test.
Fields:
androidInstrumentationTest: An Android instrumentation test.
androidMonkeyTest: An Android monkey test.
androidRoboTest: An Android robo test.
autoGoogleLogin: Enables automatic Google account login. If set, the
service will automatically generate a Google test account and use it to
log into the device, before executing the test. Note that test accounts
might be reused. Many applications can be tested more effectively in the
context of such an account. Default is false. Optional
testSetup: Test setup requirements e.g. files to install, bootstrap
scripts
testTimeout: Max time a test execution is allowed to run before it is
automatically cancelled.
"""
androidInstrumentationTest = _messages.MessageField('AndroidInstrumentationTest', 1)
androidMonkeyTest = _messages.MessageField('AndroidMonkeyTest', 2)
androidRoboTest = _messages.MessageField('AndroidRoboTest', 3)
autoGoogleLogin = _messages.BooleanField(4)
testSetup = _messages.MessageField('TestSetup', 5)
testTimeout = _messages.StringField(6)
class TestingProjectsDevicesCreateRequest(_messages.Message):
"""A TestingProjectsDevicesCreateRequest object.
Fields:
device: A Device resource to be passed as the request body.
projectId: The GCE project under which to create the device.
sshPublicKey: The public key to be set on the device in order to SSH into
it.
"""
device = _messages.MessageField('Device', 1)
projectId = _messages.StringField(2, required=True)
sshPublicKey = _messages.StringField(3)
class TestingProjectsDevicesDeleteRequest(_messages.Message):
"""A TestingProjectsDevicesDeleteRequest object.
Fields:
deviceId: The GCE virtual Android device to be deleted.
projectId: The GCE project that contains the device to be deleted.
"""
deviceId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class TestingProjectsDevicesGetRequest(_messages.Message):
"""A TestingProjectsDevicesGetRequest object.
Fields:
deviceId: The id of the GCE Android virtual device.
projectId: The GCE project that contains this device instance.
"""
deviceId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class TestingProjectsDevicesKeepaliveRequest(_messages.Message):
"""A TestingProjectsDevicesKeepaliveRequest object.
Fields:
deviceId: The GCE virtual Android device to be issued the keep-alive.
projectId: The GCE project that contains the device to be issued the keep-
alive.
"""
deviceId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class TestingProjectsDevicesListRequest(_messages.Message):
"""A TestingProjectsDevicesListRequest object.
Fields:
pageSize: Used to specify the max number of device results to be returned.
pageToken: Used to request a specific page of the device results list.
projectId: The GCE project to list the devices from.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
class TestingProjectsTestMatricesCancelRequest(_messages.Message):
"""A TestingProjectsTestMatricesCancelRequest object.
Fields:
projectId: Cloud project that owns the test.
testMatrixId: Test matrix that will be canceled.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesCreateRequest(_messages.Message):
"""A TestingProjectsTestMatricesCreateRequest object.
Fields:
projectId: The GCE project under which this job will run.
requestId: A string id used to detect duplicated requests. Ids are
automatically scoped to a project, so users should ensure the ID is
unique per-project. A UUID is recommended. Optional, but strongly
recommended.
testMatrix: A TestMatrix resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
requestId = _messages.StringField(2)
testMatrix = _messages.MessageField('TestMatrix', 3)
class TestingProjectsTestMatricesDeleteRequest(_messages.Message):
"""A TestingProjectsTestMatricesDeleteRequest object.
Fields:
projectId: Cloud project that owns the test.
testMatrixId: Test matrix that will be canceled.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesGetRequest(_messages.Message):
"""A TestingProjectsTestMatricesGetRequest object.
Fields:
projectId: Cloud project that owns the test matrix.
testMatrixId: Unique test matrix id which was assigned by the service.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesListRequest(_messages.Message):
"""A TestingProjectsTestMatricesListRequest object.
Fields:
projectId: Cloud project that owns the tests.
"""
projectId = _messages.StringField(1, required=True)
class TestingProjectsWebdriverCreateRequest(_messages.Message):
"""A TestingProjectsWebdriverCreateRequest object.
Fields:
projectId: The GCP project under which to create the WebDriver
environment.
webDriver: A WebDriver resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
webDriver = _messages.MessageField('WebDriver', 2)
class TestingProjectsWebdriverDeleteRequest(_messages.Message):
"""A TestingProjectsWebdriverDeleteRequest object.
Fields:
projectId: The GCP project that contains the WebDriver endpoint to be
deleted.
webdriverId: The GCE WebDriver environment to be deleted specified from
the WebDriver id.
"""
projectId = _messages.StringField(1, required=True)
webdriverId = _messages.StringField(2, required=True)
class TestingProjectsWebdriverGetRequest(_messages.Message):
"""A TestingProjectsWebdriverGetRequest object.
Fields:
projectId: The GCP project that contains this WebDriver instance.
webdriverId: The GCE WebDriver environment to be deleted specified from
the WebDriver id.
"""
projectId = _messages.StringField(1, required=True)
webdriverId = _messages.StringField(2, required=True)
class TestingProjectsWebdriverKeepaliveRequest(_messages.Message):
"""A TestingProjectsWebdriverKeepaliveRequest object.
Fields:
projectId: The GCP project that contains the webdriver to be issued the
keep-alive.
webDriverKeepAliveRequest: A WebDriverKeepAliveRequest resource to be
passed as the request body.
webdriverId: The WebDriver environment to be issued the keep-alive.
"""
projectId = _messages.StringField(1, required=True)
webDriverKeepAliveRequest = _messages.MessageField('WebDriverKeepAliveRequest', 2)
webdriverId = _messages.StringField(3, required=True)
class TestingProjectsWebdriverListRequest(_messages.Message):
"""A TestingProjectsWebdriverListRequest object.
Fields:
pageSize: Used to specify the max number of results to be returned.
pageToken: Used to request a specific page of the results list.
projectId: The GCP project to list the environments from.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
class TestingTestEnvironmentCatalogGetRequest(_messages.Message):
"""A TestingTestEnvironmentCatalogGetRequest object.
Enums:
EnvironmentTypeValueValuesEnum: The type of environment that should be
listed.
Fields:
environmentType: The type of environment that should be listed.
"""
class EnvironmentTypeValueValuesEnum(_messages.Enum):
"""The type of environment that should be listed.
Values:
ENVIRONMENT_TYPE_UNSPECIFIED: <no description>
ANDROID: <no description>
WEBDRIVER: <no description>
"""
ENVIRONMENT_TYPE_UNSPECIFIED = 0
ANDROID = 1
WEBDRIVER = 2
environmentType = _messages.EnumField('EnvironmentTypeValueValuesEnum', 1, required=True)
class ToolResultsExecution(_messages.Message):
"""Represents a tool results execution resource. This has the results of a
TestMatrix.
Fields:
executionId: A tool results execution ID. @OutputOnly
historyId: A tool results history ID. @OutputOnly
projectId: The cloud project that owns the tool results execution.
@OutputOnly
"""
executionId = _messages.StringField(1)
historyId = _messages.StringField(2)
projectId = _messages.StringField(3)
class ToolResultsHistory(_messages.Message):
"""Represents a tool results history resource.
Fields:
historyId: A tool results history ID.
projectId: The cloud project that owns the tool results history.
"""
historyId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ToolResultsStep(_messages.Message):
"""Represents a tool results step resource. This has the results of a
TestExecution.
Fields:
executionId: A tool results execution ID. @OutputOnly
historyId: A tool results history ID. @OutputOnly
projectId: The cloud project that owns the tool results step. @OutputOnly
stepId: A tool results step ID. @OutputOnly
"""
executionId = _messages.StringField(1)
historyId = _messages.StringField(2)
projectId = _messages.StringField(3)
stepId = _messages.StringField(4)
class VMDetails(_messages.Message):
"""A VMDetails object.
Enums:
StateValueValuesEnum: State of the device. @OutputOnly
Fields:
creationTime: The time this device was initially created. @OutputOnly
deviceDetails: Information about the backing GCE instance and connection.
@OutputOnly
state: State of the device. @OutputOnly
stateDetails: Details about the state of the device. @OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""State of the device. @OutputOnly
Values:
DEVICE_UNSPECIFIED: Do not use. For proto versioning only.
PREPARING: The device is in the process of spinning up.
READY: The device is created and ready to use.
CLOSED: The device has been closed.
DEVICE_ERROR: There has been an error.
"""
DEVICE_UNSPECIFIED = 0
PREPARING = 1
READY = 2
CLOSED = 3
DEVICE_ERROR = 4
creationTime = _messages.StringField(1)
deviceDetails = _messages.MessageField('DeviceDetails', 2)
state = _messages.EnumField('StateValueValuesEnum', 3)
stateDetails = _messages.MessageField('DeviceStateDetails', 4)
class WebDriver(_messages.Message):
"""A WebDriver environment.
Fields:
androidDevice: An Android device.
browserId: The id of the browser to be used. Use the
EnvironmentDiscoveryService to get supported values. Required
endpoint: The endpoint in host:port format where the target running the
specified browser accepts WebDriver protocol commands. @OutputOnly
id: Unique id set by the system. @OutputOnly
linuxMachine: A Linux virtual machine.
projectId: The GCE project for this WebDriver test environment.
@OutputOnly
sshPublicKey: The public key to be set on the VM in order to SSH into it.
vmDetails: The state details of the target device/machine. @OutputOnly
windowsMachine: A Windows virtual machine.
"""
androidDevice = _messages.MessageField('AndroidDevice', 1)
browserId = _messages.StringField(2)
endpoint = _messages.StringField(3)
id = _messages.StringField(4)
linuxMachine = _messages.MessageField('LinuxMachine', 5)
projectId = _messages.StringField(6)
sshPublicKey = _messages.StringField(7)
vmDetails = _messages.MessageField('VMDetails', 8)
windowsMachine = _messages.MessageField('WindowsMachine', 9)
class WebDriverCatalog(_messages.Message):
"""The currently supported WebDriver VM resources.
Fields:
browsers: The set of supported browsers. @OutputOnly
"""
browsers = _messages.MessageField('Browser', 1, repeated=True)
class WebDriverKeepAliveRequest(_messages.Message):
"""Request to issue a keep-alive to a WebDriver environment instance by
project and webdriver ids.
"""
class WindowsMachine(_messages.Message):
"""A single Windows machine.
Fields:
versionId: The version id of the Windows OS to be used. Use the
EnvironmentDiscoveryService to get supported options.
"""
versionId = _messages.StringField(1)
class WindowsMachineCatalog(_messages.Message):
"""The currently supported Windows machines.
Fields:
versions: The set of supported Windows versions. @OutputOnly
"""
versions = _messages.MessageField('WindowsVersion', 1, repeated=True)
class WindowsVersion(_messages.Message):
"""A version of a Windows OS.
Fields:
id: The unique opaque id for this Windows Version. @OutputOnly
tags: Tags for this version. Examples: "default"
versionString: A string representing this version of the Windows OS.
Examples: "windows-server-2008-r2-dc-v20150331", windows-7" @OutputOnly
"""
id = _messages.StringField(1)
tags = _messages.StringField(2, repeated=True)
versionString = _messages.StringField(3)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'testing')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'testing')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'testing')
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/testing/v1/testing_v1_messages.py
|
Python
|
bsd-3-clause
| 53,170
|
[
"Galaxy"
] |
9b14cfb3864f66f1cf0fb75eb059f42264f8c3819d7badcd5a315fc1ed85aa66
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for Video.
"""
import os
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipIf, skip
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig
from ...pages.lms.video.video import VideoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import skip_if_browser
from flaky import flaky
VIDEO_SOURCE_PORT = 8777
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super(VideoBaseTest, self).setUp()
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.contents_of_verticals = None
self.youtube_configuration = {}
self.user_info = {}
# reset youtube stub server
self.addCleanup(YouTubeStubConfig.reset)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
YouTubeStubConfig.configure(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_contents_of_verticals = self.contents_of_verticals
# Video tests require at least one vertical with a single video.
if not _contents_of_verticals:
_contents_of_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_contents_of_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical_contents, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical_contents: a list of items for the vertical to contain
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index))
for video in vertical_contents:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
self.auth_page.visit()
self.user_info = self.auth_page.user_info
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
if player_mode == 'html5':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if additional_data:
metadata.update(additional_data)
return metadata
def go_to_sequential_position(self, position):
"""
Navigate to sequential specified by `video_display_name`
"""
self.courseware.go_to_sequential_position(position)
self.video.wait_for_video_player_render()
@attr('shard_4')
class YouTubeVideoTest(VideoBaseTest):
""" Test YouTube Video Player """
def setUp(self):
super(YouTubeVideoTest, self).setUp()
def test_youtube_video_rendering_wo_html5_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
"""
self.navigate_to_video()
# Verify that video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_transcript_button_wo_english_transcript(self):
"""
Scenario: Transcript button works correctly w/o english transcript in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "好 各位同学" text in the transcript
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_cc_button(self):
"""
Scenario: CC button works correctly with transcript in YouTube mode
Given the course has a video component in "Youtube" mode
And I have defined a transcript for the video
Then I see the closed captioning element over the video
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
# Show captions and make sure they're visible and cookie is set
self.video.show_closed_captions()
self.video.wait_for_closed_captions()
self.assertTrue(self.video.is_closed_captions_visible)
self.video.reload_page()
self.assertTrue(self.video.is_closed_captions_visible)
# Hide captions and make sure they're hidden and cookie is unset
self.video.hide_closed_captions()
self.video.wait_for_closed_captions_to_be_hidden()
self.video.reload_page()
self.video.wait_for_closed_captions_to_be_hidden()
def test_transcript_button_transcripts_and_sub_fields_empty(self):
"""
Scenario: Transcript button works correctly if transcripts and sub fields are empty,
but transcript file exists in assets (Youtube mode of Video component)
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
Then I see the correct english text in the captions
"""
self._install_course_fixture()
self.course_fixture.add_asset(['subs_3_yD_cEKoCk.srt.sjson'])
self.course_fixture._upload_assets()
self._navigate_to_courseware_video_and_render()
self.video.show_captions()
# Verify that we see "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
def test_transcript_button_hidden_no_translations(self):
"""
Scenario: Transcript button is hidden if no translations
Given the course has a Video component in "Youtube" mode
Then the "Transcript" button is hidden
"""
self.navigate_to_video()
self.assertFalse(self.video.is_button_shown('transcript_button'))
def test_fullscreen_video_alignment_with_transcript_hidden(self):
"""
Scenario: Video is aligned with transcript hidden in fullscreen mode
Given the course has a Video component in "Youtube" mode
When I view the video at fullscreen
Then the video with the transcript hidden is aligned correctly
"""
self.navigate_to_video()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in YouTube mode
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I can download the transcript in "srt" format
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_fullscreen_video_alignment_on_transcript_toggle(self):
"""
Scenario: Video is aligned correctly on transcript toggle in fullscreen mode
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
And the video with the transcript hidden is aligned correctly
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
# click video button "transcript"
self.video.click_player_button('transcript_button')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_video_rendering_with_default_response_time(self):
"""
Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly
Given the YouTube server response time less than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "Youtube" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 0.4
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_video_rendering_wo_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 2.0
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_video_with_youtube_blocked_with_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_video_with_youtube_blocked_delayed_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube server response time is greater than 1.5 seconds
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_html5_video_rendered_with_youtube_captions(self):
"""
Scenario: User should see Youtube captions for If there are no transcripts
available for HTML5 mode
Given that I have uploaded a .srt.sjson file to assets for Youtube mode
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
And Video component rendered in HTML5 mode
And Html5 mode video has no transcripts
When I see the captions for HTML5 mode video
Then I should see the Youtube captions
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube_html5', additional_data=data)
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# check if caption button is visible
self.assertTrue(self.video.is_button_shown('transcript_button'))
self._verify_caption_text('Welcome to edX.')
def test_download_transcript_button_works_correctly(self):
"""
Scenario: Download Transcript button works correctly
Given the course has Video components A and B in "Youtube" mode
And Video component C in "HTML5" mode
And I have defined downloadable transcripts for the videos
Then I can download a transcript for Video A in "srt" format
And I can download a transcript for Video A in "txt" format
And I can download a transcript for Video B in "txt" format
And the Download Transcript menu does not exist for Video C
"""
data_a = {'sub': '3_yD_cEKoCk', 'download_track': True}
youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a)
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True}
youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b)
self.assets.append('subs_b7xgknqkQk8.srt.sjson')
data_c = {'track': 'http://example.org/', 'download_track': True}
html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c)
self.contents_of_verticals = [
[{'display_name': 'A', 'metadata': youtube_a_metadata}],
[{'display_name': 'B', 'metadata': youtube_b_metadata}],
[{'display_name': 'C', 'metadata': html5_c_metadata}]
]
# open the section with videos (open vertical containing video "A")
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "00:00:00,260"
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', '00:00:00,260'))
# select the transcript format "txt"
self.assertTrue(self.video.select_transcript_format('txt'))
# check if we can download transcript in "txt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Welcome to edX.'))
# open vertical containing video "B"
self.course_nav.go_to_vertical('Test Vertical-1')
# check if we can download transcript in "txt" format that has text "Equal transcripts"
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Equal transcripts'))
# open vertical containing video "C"
self.course_nav.go_to_vertical('Test Vertical-2')
# menu "download_transcript" doesn't exist
self.assertFalse(self.video.is_menu_present('download_transcript'))
def _verify_caption_text(self, text):
self.video._wait_for(
lambda: (text in self.video.captions_text),
u'Captions contain "{}" text'.format(text),
timeout=5
)
def _verify_closed_caption_text(self, text):
"""
Scenario: returns True if the captions are visible, False is else
"""
self.video.wait_for(
lambda: (text in self.video.closed_captions_text),
u'Closed captions contain "{}" text'.format(text),
timeout=5
)
def test_video_language_menu_working(self):
"""
Scenario: Language menu works correctly in Video component
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "zh"
Then I see "好 各位同学" text in the captions
And I select language with code "en"
Then I see "Welcome to edX." text in the captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.hide_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
self.video.select_language('zh')
unicode_text = "好 各位同学".decode('utf-8')
self._verify_caption_text(unicode_text)
self.video.select_language('en')
self._verify_caption_text('Welcome to edX.')
def test_video_language_menu_working_closed_captions(self):
"""
Scenario: Language menu works correctly in Video component, checks closed captions
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "en"
Then I see "Welcome to edX." text in the closed captions
And I select language with code "zh"
Then I see "我们今天要讲的题目是" text in the closed captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_closed_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
# we start the video, then pause it to activate the transcript
self.video.click_player_button('play')
self.video.wait_for_position('0:03')
self.video.click_player_button('pause')
self.video.select_language('en')
self.video.click_first_line_in_transcript()
self._verify_closed_caption_text('Welcome to edX.')
self.video.select_language('zh')
unicode_text = "我们今天要讲的题目是".decode('utf-8')
self.video.click_first_line_in_transcript()
self._verify_closed_caption_text(unicode_text)
def test_multiple_videos_in_sequentials_load_and_work(self):
"""
Scenario: Multiple videos in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" mode in position "1" of sequential
And videos "C,D" in "Youtube" mode in position "2" of sequential
"""
self.contents_of_verticals = [
[{'display_name': 'A'}, {'display_name': 'B'}],
[{'display_name': 'C'}, {'display_name': 'D'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C', 'D']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.use_video(video_name)
self.video.click_player_button('play')
self.assertIn(self.video.state, ['playing', 'buffering'])
self.video.click_player_button('pause')
# go to video
self.navigate_to_video()
execute_video_steps(tab1_video_names)
# go to second sequential position
# import ipdb; ipdb.set_trace()
self.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
# import ipdb; ipdb.set_trace()
self.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
def test_video_component_stores_speed_correctly_for_multiple_videos(self):
"""
Scenario: Video component stores speed correctly when each video is in separate sequential
Given I have a video "A" in "Youtube" mode in position "1" of sequential
And a video "B" in "Youtube" mode in position "2" of sequential
And a video "C" in "HTML5" mode in position "3" of sequential
"""
# vertical titles are created in VideoBaseTest._create_single_vertical
# and are of the form Test Vertical-{_} where _ is the index in self.contents_of_verticals
self.contents_of_verticals = [
[{'display_name': 'A'}], [{'display_name': 'B'}],
[{'display_name': 'C', 'metadata': self.metadata_for_mode('html5')}]
]
self.navigate_to_video()
# select the "2.0" speed on video "A"
self.course_nav.go_to_vertical('Test Vertical-0')
self.video.wait_for_video_player_render()
self.video.speed = '2.0'
# select the "0.50" speed on video "B"
self.course_nav.go_to_vertical('Test Vertical-1')
self.video.wait_for_video_player_render()
self.video.speed = '0.50'
# open video "C"
self.course_nav.go_to_vertical('Test Vertical-2')
self.video.wait_for_video_player_render()
# Since the playback speed was set to .5 in "B", this video will also be impacted
# because a playback speed has never explicitly been set for it. However, this video
# does not have a .5 playback option, so the closest possible (.75) should be selected.
self.video.verify_speed_changed('0.75x')
# go to the vertical containing video "A"
self.course_nav.go_to_vertical('Test Vertical-0')
# Video "A" should still play at speed 2.0 because it was explicitly set to that.
self.assertEqual(self.video.speed, '2.0x')
# reload the page
self.video.reload_page()
# go to the vertical containing video "A"
self.course_nav.go_to_vertical('Test Vertical-0')
# check if video "A" should start playing at speed "2.0"
self.assertEqual(self.video.speed, '2.0x')
# select the "1.0" speed on video "A"
self.video.speed = '1.0'
# go to the vertical containing "B"
self.course_nav.go_to_vertical('Test Vertical-1')
# Video "B" should still play at speed .5 because it was explicitly set to that.
self.assertEqual(self.video.speed, '0.50x')
# go to the vertical containing video "C"
self.course_nav.go_to_vertical('Test Vertical-2')
# The change of speed for Video "A" should impact Video "C" because it still has
# not been explicitly set to a speed.
self.video.verify_speed_changed('1.0x')
def test_video_has_correct_transcript(self):
"""
Scenario: Youtube video has correct transcript if fields for other speeds are filled
Given it has a video in "Youtube" mode
And I have uploaded multiple transcripts
And I make sure captions are opened
Then I see "Welcome to edX." text in the captions
And I select the "1.50" speed
And I reload the page with video
Then I see "Welcome to edX." text in the captions
And I see duration "1:56"
"""
self.assets.extend(['subs_3_yD_cEKoCk.srt.sjson', 'subs_b7xgknqkQk8.srt.sjson'])
data = {'sub': '3_yD_cEKoCk', 'youtube_id_1_5': 'b7xgknqkQk8'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.video.speed = '1.50'
self.video.reload_page()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.assertTrue(self.video.duration, '1.56')
def test_video_position_stored_correctly_wo_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
Then I wait until video reaches at position "0.03"
And I click video button "pause"
And I reload the page with video
And I click video button "play""
And I click video button "pause"
Then video slider should be Equal or Greater than "0:03"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:03')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 3)
@skip("Intermittently fails 03 June 2014")
def test_video_position_stored_correctly_with_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
And I click video button "pause"
Then I seek video to "0:10" position
And I click video button "play""
And I click video button "pause"
And I reload the page with video
Then video slider should be Equal or Greater than "0:10"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.seek('0:10')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 10)
def test_simplified_and_traditional_chinese_transcripts(self):
"""
Scenario: Simplified and Traditional Chinese transcripts work as expected in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a Simplified Chinese transcript for the video
And I have defined a Traditional Chinese transcript for the video
Then I see the correct subtitle language options in cc menu
Then I see the correct text in the captions for Simplified and Traditional Chinese transcripts
And I can download the transcripts for Simplified and Traditional Chinese
And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese'
and 'Traditional Chinese' respectively
"""
data = {
'download_track': True,
'transcripts': {'zh_HANS': 'simplified_chinese.srt', 'zh_HANT': 'traditional_chinese.srt'}
}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.extend(['simplified_chinese.srt', 'traditional_chinese.srt'])
self.navigate_to_video()
langs = {'zh_HANS': '在线学习是革', 'zh_HANT': '在線學習是革'}
for lang_code, text in langs.items():
self.assertTrue(self.video.select_language(lang_code))
unicode_text = text.decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'})
def test_video_bumper_render(self):
"""
Scenario: Multiple videos with bumper in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" and "HTML5" modes in position "1" of sequential
And video "C" in "Youtube" mode in position "2" of sequential
When I open sequential position "1"
Then I see video "B" has a poster
When I click on it
Then I see video bumper is playing
When I skip the bumper
Then I see the main video
When I click on video "A"
Then the main video starts playing
When I open sequential position "2"
And click on the poster
Then the main video starts playing
Then I see that the main video starts playing once I go back to position "2" of sequential
When I reload the page
Then I see that the main video starts playing when I click on the poster
"""
additional_data = {
u'video_bumper': {
u'value': {
"transcripts": {},
"video_id": "video_001"
}
}
}
self.contents_of_verticals = [
[{'display_name': 'A'}, {'display_name': 'B', 'metadata': self.metadata_for_mode('html5')}],
[{'display_name': 'C'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.use_video(video_name)
self.assertTrue(self.video.is_poster_shown)
self.video.click_on_poster()
self.video.wait_for_video_player_render(autoplay=True)
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.course_fixture.add_advanced_settings(additional_data)
self.navigate_to_video_no_render()
self.video.use_video('B')
self.assertTrue(self.video.is_poster_shown)
self.video.click_on_poster()
self.video.wait_for_video_bumper_render()
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.video.click_player_button('skip_bumper')
# no autoplay here, maybe video is too small, so pause is not switched
self.video.wait_for_video_player_render()
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.video.use_video('A')
execute_video_steps(['A'])
# go to second sequential position
self.courseware.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
self.courseware.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
self.video.browser.refresh()
execute_video_steps(tab1_video_names)
@attr('shard_4')
class YouTubeHtml5VideoTest(VideoBaseTest):
""" Test YouTube HTML5 Video Player """
def setUp(self):
super(YouTubeHtml5VideoTest, self).setUp()
@flaky # TODO fix this, see TNL-1642
def test_youtube_video_rendering_with_unsupported_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode
with HTML5 sources that doesn't supported by browser
Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode
Then the video has rendered in "Youtube" mode
"""
self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video')
self.navigate_to_video()
# Verify that the video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
@attr('shard_4')
class Html5VideoTest(VideoBaseTest):
""" Test HTML5 Video Player """
def setUp(self):
super(Html5VideoTest, self).setUp()
def test_autoplay_disabled_for_video_component(self):
"""
Scenario: Autoplay is disabled by default for a Video component
Given the course has a Video component in "HTML5" mode
When I view the Video component
Then it does not have autoplay enabled
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
# Verify that the video has autoplay mode disabled
self.assertFalse(self.video.is_autoplay_enabled)
def test_html5_video_rendering_with_unsupported_sources(self):
"""
Scenario: LMS displays an error message for HTML5 sources that are not supported by browser
Given the course has a Video component in "HTML5_Unsupported_Video" mode
When I view the Video component
Then and error message is shown
And the error message has the correct text
"""
self.metadata = self.metadata_for_mode('html5_unsupported_video')
self.navigate_to_video_no_render()
# Verify that error message is shown
self.assertTrue(self.video.is_error_message_shown)
# Verify that error message has correct text
correct_error_message_text = 'No playable video sources found.'
self.assertIn(correct_error_message_text, self.video.error_message_text)
# Verify that spinner is not shown
self.assertFalse(self.video.is_spinner_shown)
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# Then I can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_full_screen_video_alignment_with_transcript_visible(self):
"""
Scenario: Video is aligned correctly with transcript enabled in fullscreen mode
Given the course has a Video component in "HTML5" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I show the captions
And I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
def test_cc_button_with_english_transcript(self):
"""
Scenario: CC button works correctly with only english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined english subtitles for the video
And I have uploaded an english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "Welcome to edX." text in the captions
self.assertIn("Welcome to edX.", self.video.captions_text)
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('chinese_transcripts.srt')
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_video_rendering(self):
"""
Scenario: Video component is fully rendered in the LMS in HTML5 mode
Given the course has a Video component in "HTML5" mode
Then the video has rendered in "HTML5" mode
And video sources are correct
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources]))
@attr('shard_4')
class YouTubeQualityTest(VideoBaseTest):
""" Test YouTube Video Quality Button """
def setUp(self):
super(YouTubeQualityTest, self).setUp()
@skip_if_browser('firefox')
def test_quality_button_visibility(self):
"""
Scenario: Quality button appears on play.
Given the course has a Video component in "Youtube" mode
Then I see video button "quality" is hidden
And I click video button "play"
Then I see video button "quality" is visible
"""
self.navigate_to_video()
self.assertFalse(self.video.is_quality_button_visible)
self.video.click_player_button('play')
self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear')
@skip_if_browser('firefox')
def test_quality_button_works_correctly(self):
"""
Scenario: Quality button works correctly.
Given the course has a Video component in "Youtube" mode
And I click video button "play"
And I see video button "quality" is inactive
And I click video button "quality"
Then I see video button "quality" is active
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear')
self.assertFalse(self.video.is_quality_button_active)
self.video.click_player_button('quality')
self.video.wait_for(lambda: self.video.is_quality_button_active, 'waiting for quality button activation')
@attr('shard_4')
class DragAndDropTest(VideoBaseTest):
"""
Tests draggability of closed captions within videos.
"""
def setUp(self):
super(DragAndDropTest, self).setUp()
def test_if_captions_are_draggable(self):
"""
Loads transcripts so that closed-captioning is available.
Ensures they are draggable by checking start and dropped location.
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.video.show_closed_captions()
self.video.wait_for_closed_captions()
self.assertTrue(self.video.is_closed_captions_visible)
action = ActionChains(self.browser)
captions = self.browser.find_element(By.CLASS_NAME, 'closed-captions')
captions_start = captions.location
action.drag_and_drop_by_offset(captions, 0, -15).perform()
captions_end = captions.location
# We have to branch here due to unexpected behaviour of chrome.
# Chrome sets the y offset of element to 834 instead of 650
if self.browser.name == 'chrome':
self.assertEqual(
captions_end.get('y') - 168,
captions_start.get('y'),
'Closed captions did not get dragged.'
)
else:
self.assertEqual(
captions_end.get('y') + 15,
captions_start.get('y'),
'Closed captions did not get dragged.'
)
@attr('a11y')
class LMSVideoModuleA11yTest(VideoBaseTest):
"""
LMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(LMSVideoModuleA11yTest, self).setUp()
def test_video_player_a11y(self):
# load transcripts so we can test skipping to
self.assets.extend(['english_single_transcript.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"en": "english_single_transcript.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
# limit the scope of the audit to the video player only.
self.video.a11y_audit.config.set_scope(
include=["div.video"]
)
self.video.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
]
})
self.video.a11y_audit.check_for_accessibility_errors()
|
waheedahmed/edx-platform
|
common/test/acceptance/tests/video/test_video_module.py
|
Python
|
agpl-3.0
| 51,809
|
[
"VisIt"
] |
fcd681316fb561335e714ceb8b20da88620aed49c75c97c7eaeed7949daeeda1
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""Schema migration
"""
import fnmatch
import logging
import os
import re
import shutil
import sys
import tempfile
import traceback
from kiwi.environ import environ
from stoqlib.database.runtime import (get_default_store,
new_store)
from stoqlib.database.settings import db_settings, check_extensions
from stoqlib.domain.plugin import InstalledPlugin
from stoqlib.domain.profile import update_profile_applications
from stoqlib.exceptions import (DatabaseInconsistency, StoqlibError,
DatabaseError)
from stoqlib.lib.crashreport import collect_traceback
from stoqlib.lib.defaults import stoqlib_gettext
from stoqlib.lib.message import error, info
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.pluginmanager import get_plugin_manager
_ = stoqlib_gettext
log = logging.getLogger(__name__)
# Used by the wizard
create_log = logging.getLogger('stoqlib.database.create')
class Patch(object):
"""A Database Patch
:attribute filename: patch filename
:attribute level: database level
"""
def __init__(self, filename, migration):
"""
Create a new Patch object.
:param filename:
:param migration
"""
self.filename = filename
# Base is the part of the filename minus the extension
base = os.path.basename(filename).split('.')[0]
# "patch-00-20" -> (00, 20): (generation, level)
base_parts = base.split('-', 2)
self.generation = int(base_parts[1])
self.level = int(base_parts[2])
self._migration = migration
def __cmp__(self, other):
return cmp(self.get_version(), other.get_version())
def apply(self, store):
"""Apply the patch
:param store: a store
"""
# Dont lock the database here, since StoqlibSchemaMigration.update has
# already did that before starting to apply the patches
# SQL statement to update the system_table
sql = self._migration.generate_sql_for_patch(self)
if self.filename.endswith('.sql'):
# Create a temporary file used for writing SQL statements
temporary = tempfile.mktemp(prefix="patch-%d-%d-" % self.get_version())
# Overwrite the temporary file with the sql patch we want to apply
shutil.copy(self.filename, temporary)
# After successfully executing the SQL statements, we need to
# make sure that the system_table is updated with the correct
# schema generation and patchlevel
open(temporary, 'a').write(sql)
retcode = db_settings.execute_sql(temporary)
if retcode != 0:
error('Failed to apply %s, psql returned error code: %d' % (
os.path.basename(self.filename), retcode))
os.unlink(temporary)
elif self.filename.endswith('.py'):
# Execute the patch, we cannot use __import__() since there are
# hyphens in the filename and data/sql lacks an __init__.py
ns = {}
execfile(self.filename, ns, ns)
function = ns['apply_patch']
# Create a new store that will be used to apply the patch and
# to update the system tables after the patch has been successfully
# applied
patch_store = new_store()
# Apply the patch itself
function(patch_store)
# After applying the patch, update the system_table within the same
# transaction
patch_store.execute(sql)
patch_store.commit(close=True)
else:
raise AssertionError("Unknown filename: %s" % (self.filename, ))
def get_version(self):
"""Returns the patch version
:returns: a tuple with the patch generation and level
"""
return self.generation, self.level
class SchemaMigration(object):
"""Schema migration management
Is currently doing the following things:
- Applies database patches
- Makes sure that all parameters are present
- Makes sure that all applications are present
"""
patch_resource_domain = None
patch_resource = None
patch_patterns = ["patch*.sql", "patch*.py"]
def __init__(self):
if self.patch_resource is None:
raise ValueError(
_("%s needs to have the patch_resource class variable set") % (
self.__class__.__name__))
if self.patch_patterns is None:
raise ValueError(
_("%s needs to have the patch_patterns class variable set") % (
self.__class__.__name__))
self.default_store = get_default_store()
try:
check_extensions(store=self.default_store)
except ValueError:
error("Missing PostgreSQL extension on the server, "
"please install postgresql-contrib")
def _patchname_is_valid(self, filename):
# simple checking of the patch naming convention
valid_patterns = [r"patch-\d\d-\d\d.sql",
r"patch-\d\d-\d\d.py"]
for valid_pattern in valid_patterns:
if re.match(valid_pattern, filename) is not None:
return True
return False
def _get_patches(self):
patches = []
for filename in environ.get_resource_names(self.patch_resource_domain,
self.patch_resource):
for pattern in self.patch_patterns:
if not fnmatch.fnmatch(filename, pattern):
continue
if not self._patchname_is_valid(filename):
print("Invalid patch name: %s" % filename)
continue
filename = environ.get_resource_filename(
self.patch_resource_domain, self.patch_resource, filename)
patches.append(Patch(filename, self))
return sorted(patches)
def _update_schema(self):
"""Check the current version of database and update the schema if
it's needed
"""
log.info("Updating schema")
if self.check_uptodate():
log.info("Schema is already up to date")
return
patches = self._get_patches()
latest_available = patches[-1].get_version()
current_version = self.get_current_version()
last_level = None
if current_version != latest_available:
patches_to_apply = []
for patch in patches:
if patch.get_version() <= current_version:
continue
patches_to_apply.append(patch)
from stoqlib.database.admin import create_database_functions
create_database_functions()
log.info("Applying %d patches" % (len(patches_to_apply), ))
create_log.info("PATCHES:%d" % (len(patches_to_apply), ))
for patch in patches_to_apply:
create_log.info("PATCH:%d.%d" % (patch.generation,
patch.level))
patch.apply(self.default_store)
assert patches_to_apply
log.info("All patches (%s) applied." % (
', '.join(str(p.level) for p in patches_to_apply)))
last_level = patches_to_apply[-1].get_version()
self.after_update()
return current_version, last_level
# Public API
def check(self, check_plugins=True):
if self.check_uptodate():
return True
if not check_plugins:
return True
if self.check_plugins():
return True
error(_("Database schema error"),
_("The database schema has changed, but the database has "
"not been updated. Run 'stoqdbadmin updateschema` to "
"update the schema to the latest available version."))
return False
def check_uptodate(self):
"""
Verify if the schema is up to date.
:returns: True or False.
"""
# Fetch the latest, eg the last in the list
patches = self._get_patches()
latest_available = patches[-1].get_version()
current_version = self.get_current_version()
if current_version == latest_available:
return True
elif current_version > latest_available:
current = "(%d.%d)" % current_version
latest = "(%d.%d)" % latest_available
raise DatabaseInconsistency(
_('The current version of database %s is greater than the '
'latest available version %s. Try upgrading your '
'installation.') % (current, latest))
return False
def _log(self, msg):
create_log.info(msg)
def apply_all_patches(self):
"""Apply all available patches
"""
log.info("Applying all patches")
current_version = self.get_current_version()
to_apply = []
for patch in self._get_patches():
if patch.get_version() > current_version:
to_apply.append(patch)
self._log("PATCHES:%d" % (len(to_apply), ))
for i, patch in enumerate(to_apply):
self._log("PATCH:%d" % (i, ))
patch.apply(self.default_store)
self._log("PATCHES APPLIED")
def update(self):
"""Updates the database schema
"""
if self.check_uptodate():
print('Database is already at the latest version %d.%d' % (
self.get_current_version()))
else:
from_, to = self._update_schema()
if to is None:
print('Database schema is already up to date')
else:
f = "(%d.%d)" % from_
t = "(%d.%d)" % to
print('Database schema updated from %s to %s' % (f, t))
def get_current_version(self):
"""This method is revision for returning the database schema version
for a migration subclass
This must be implemented in a subclass
:returns: the current database patch version
"""
raise NotImplementedError
def generate_sql_for_patch(self, patch):
"""This method is responsible for creating an SQL
statement which is used to update the migration versioning
information
This must be implemented in a subclass
:param patch: the patch that was applied
:returns: an SQL string
"""
raise NotImplementedError
def after_update(self):
"""This can be implemented in a subclass, but it is not mandatory.
It'll be called after applying all patches
"""
class StoqlibSchemaMigration(SchemaMigration):
"""This is a SchemaMigration subclass used by Stoqlib.
It's responsible for migrating the data for stoqlib itself
and all its plugins
"""
patch_resource_domain = 'stoq'
patch_resource = 'sql'
def check_uptodate(self):
retval = super(StoqlibSchemaMigration, self).check_uptodate()
# If the database already needs upgrading, dont check the parameters
# presence (since they may need an upgrade as well)
if retval and not sysparam.check_parameter_presence():
return False
return retval
def update(self, plugins=True, backup=True):
log.info("Upgrading database (plugins=%r, backup=%r)" % (
plugins, backup))
try:
log.info("Locking database")
self.default_store.lock_database()
except DatabaseError:
msg = _('Could not lock database. This means there are other clients '
'connected. Make sure to close every Stoq client '
'before updating the database')
error(msg)
# Database migration is actually run in subprocesses, We need to unlock
# the tables again and let the upgrade continue
log.info("Releasing database lock")
self.default_store.unlock_database()
sucess = db_settings.test_connection()
if not sucess:
# FIXME: Improve this message after 1.5 is released
msg = _(u'Could not connect to the database using command line '
'tool! Aborting.') + ' '
msg += _(u'Please, check if you can connect to the database '
'using:') + ' '
msg += _(u'psql -l -h <server> -p <port> -U <username>')
error(msg)
return
if backup:
temporary = tempfile.mktemp(prefix="stoq-dump-")
log.info("Making a backup to %s" % (temporary, ))
create_log.info("BACKUP-START:")
success = db_settings.dump_database(temporary)
if not success:
info(_(u'Could not create backup! Aborting.'))
info(_(u'Please contact stoq team to inform this problem.\n'))
return
# We have to wrap a try/except statement inside a try/finally to
# support python previous to 2.5 version.
try:
try:
super(StoqlibSchemaMigration, self).update()
if plugins:
self.update_plugins()
except Exception:
exc = sys.exc_info()
tb_str = ''.join(traceback.format_exception(*exc))
collect_traceback(exc, submit=True)
create_log.info("ERROR:%s" % (tb_str, ))
if backup:
log.info("Restoring backup %s" % (temporary, ))
create_log.info("RESTORE-START:")
new_name = db_settings.restore_database(temporary)
create_log.info("RESTORE-DONE:%s" % (new_name, ))
return False
finally:
if backup is True:
os.unlink(temporary)
log.info("Migration done")
return True
def _get_plugins(self):
manager = get_plugin_manager()
for plugin_name in manager.installed_plugins_names:
if plugin_name in manager.available_plugins_names:
yield manager.get_plugin(plugin_name)
def update_plugins(self):
for plugin in self._get_plugins():
migration = plugin.get_migration()
if migration:
migration.update()
def check_plugins(self):
# This cannot be done in check_uptodate since the plugin domain
# classes were introduced as a patch and the way the callsites
# works in stoq/lib/startup.py
for plugin in self._get_plugins():
migration = plugin.get_migration()
if not migration:
continue
if not migration.check_uptodate():
return False
return True
def get_current_version(self):
result = self.default_store.execute(
"""SELECT generation, patchlevel
FROM system_table
ORDER BY updated DESC
LIMIT 1;""")
value = result.get_one()
result.close()
return value
def after_update(self):
# checks if there is new applications and update all the user
# profiles on the system
store = new_store()
update_profile_applications(store)
# Updating the parameter list
sysparam.ensure_system_parameters(store, update=True)
store.commit(close=True)
def generate_sql_for_patch(self, patch):
return self.default_store.quote_query(
"INSERT INTO system_table (updated, patchlevel, generation)"
"VALUES (NOW(), %s, %s);", (patch.level,
patch.generation))
class PluginSchemaMigration(SchemaMigration):
"""This is a SchemaMigration class which is suitable for use within
a plugin
"""
def __init__(self, plugin_name, resource_domain, resource, patterns):
"""
Create a new PluginSchemaMigration object.
:param plugin_name: name of the plugin
:param resource: resource to load sql patches from
:param patterns: sql patch pattern
"""
self.plugin_name = plugin_name
self.patch_resource_domain = resource_domain
self.patch_resource = resource
self.patch_patterns = patterns
SchemaMigration.__init__(self)
self._plugin = self.default_store.find(
InstalledPlugin, plugin_name=self.plugin_name).one()
def _log(self, msg):
create_log.info('PLUGIN ' + msg)
def generate_sql_for_patch(self, patch):
assert self._plugin
return self.default_store.quote_query(
"UPDATE installed_plugin "
"SET plugin_version = %s "
"WHERE id = %s;",
(patch.level, self._plugin.id))
def get_current_version(self):
if self._plugin:
return (0, self._plugin.plugin_version)
return (0, 0)
def needs_schema_update():
try:
migration = StoqlibSchemaMigration()
except StoqlibError:
error(_("Update Error"),
_("You need to call setup() before checking the database "
"schema."))
try:
update = not (migration.check_uptodate() and migration.check_plugins())
except DatabaseInconsistency as e:
error(str(e))
# If we need to update the database, we need to close the connection,
# otherwise the locking of the database will fail, since this connection has
# already queried a few tables
if update:
migration.default_store.commit()
return update
|
andrebellafronte/stoq
|
stoqlib/database/migration.py
|
Python
|
gpl-2.0
| 18,691
|
[
"VisIt"
] |
ca9f383f612d6eaf1ab65c0032cfc491e15cb75d9b0eb8766146882fa0912332
|
from __future__ import division # no integer division
import numpy as np
from numpy import linalg as LA
def getGCF(pt, power, corrlength):
"""Compute the generalized Gaussian covariance matrix of the form:
$$Q = exp(h/l)^p$$
** input **
pt: numpy.array(x_dim, dim)
location of each grid block
power: numpy.array(1)
p in the equation above.
p = 1: Exponential
p = 2: Gaussian
corrlength: numpy.array(1)
correlation length parameter
"""
# Check the first argument pt
x_dim = pt.shape[0]
try:
dim = pt.shape[1]
except IndexError, e:
dim = 1
except:
print "The first argument must be a numpy array"
assert dim > 0 and dim <= 3
assert x_dim > 0
assert corrlength > 0
assert power >= 0
# compute distance between two points
h = np.zeros([x_dim, x_dim])
for i in range(dim):
# import pdb;pdb.set_trace()
if dim == 1:
[PT1, PT2] = np.meshgrid(pt, pt)
else:
[PT1, PT2] = np.meshgrid(pt[:,i], pt[:,i])
h = h + (PT1 - PT2)**2
h = np.sqrt(h)
# compute the covariance matrix
Q = np.exp(-np.power(h/corrlength,power))
return Q
def testFun():
"""Test functions in common.py"""
x1 = np.array([1,2,3])
x2 = np.array([[1,2,3],
[3,4,5]])
p, l = 1, 0.5
Q1 = getGCF(x1,p,l)
Q2 = getQ(x1,l,p)
Q3 = getGCF(x2,p,l)
Q4 = getQ(x2,l,p)
print Q1 == Q2
print Q3 == Q4
def getQ(x,l,p):
Q = np.zeros((x.shape[0],x.shape[0]))
for i in range(len(x)):
for j in range(len(x)):
h = LA.norm(x[i]-x[j])
Q[i][j] = np.exp(-(h/l)**p)
return Q
if __name__ == '__main__':
testFun()
|
judithyueli/pyFKF
|
common.py
|
Python
|
mit
| 1,581
|
[
"Gaussian"
] |
249a7e07a0b2b11b9666e701c15929f113ee1698c9eeb4193f65354877b2737e
|
from __future__ import print_function
import traceback
import newspaper
import re
import time
from Data2File import getDataBasePath, article2file
class NPSpyder:
# Initialize a source object defining the source website and the regex to filter articles
# name: ID to be used to name folders etc.
# sourceURL: Initial URL from where the spider will start to craw. Ex.: http://www.bloomberg.com/
# regex: This regex will be applied to filter relevant URLs to be downloaded. Ex.: re.compile('.*(politics|economy).*', re.IGNORECASE)
def __init__(self, name, sourceURL, regex, firstRun=0):
# Source`s name to create a path where data will be saved.
self.sourceName = name
# Building a Newspapper crawler...
# defines the source website object from where the news are to be retrieved
if (firstRun == 1):
self.source = newspaper.build(sourceURL, language='en', fetch_images=False, memoize_articles=False)
else:
self.source = newspaper.build(sourceURL, language='en', fetch_images=False)
# Define a regex to filter relevant article`s URLs
self.filterRegex = regex
# Will visit one by one of the articles available on the website, downloading pages that satisfies the filter regex
def run(self):
# Defining the path to save files
basepath = getDataBasePath() + "/" + self.sourceName + "/"
start_time = time.time()
print("INFO - Crawling "+ self.sourceName +".")
# Progress controlers
counter = 0
visited = 0
concluded = 0
total = self.source.size()
# Filtering relevant articles
for article in self.source.articles:
visited += 1
concluded = (float(visited) / total) * 100
if article.title:
print("WARN - %s %.0f%% concluded, downloading URL: %s" %(self.sourceName, concluded, article.url))
next
# Complementing the paht with the month folder: yyyyMM
try:
path = basepath + article.publish_date.strftime('%Y%m') + "/"
except:
# If cannot retrieve publish date defaults to the day it has been captured
path = basepath + time.strftime('%Y%m', time.gmtime()) + "/"
try:
if self.filterRegex.match(str(article.url)):
print("INFO - %s %.0f%% concluded, downloading URL: %s" %(self.sourceName, concluded, article.url))
article.download()
article.parse()
article2file(article, path)
counter += 1
except:
# Common exception is special char on URL, try to recover by removing special chars before capture
try:
theURL = re.sub('\W+','', article.url)
if self.filterRegex.match(str(theURL)):
print("INFO - %s %.0f%% concluded, downloading URL: %s" %(self.sourceName, concluded, article.url))
article.download()
article.parse()
article2file(article, path)
counter += 1
except:
print("WARN - Cannot apply filter due to special char in URL: %s" %(article.url))
traceback.print_exc()
print("INFO - Done. %d out of %d articles saved to \"%s\"." %(counter, total, path))
elapsed_time = time.time() - start_time
print("INFO - %s crawled in %d seconds." %(self.sourceName, elapsed_time))
return counter
# Will visit and print all URLs available on the website without downloading.
# Util to do the first analysis before defining the regex to filter and to evaluate the filter later.
def visit(self):
print("INFO - Visiting "+ self.sourceName +".")
# Filtering relevant articles
for article in self.source.articles:
try:
if self.filterRegex.match(str(article.url)):
print("INFO - %s" %(article.url))
except:
# Common exception is special char on URL, try to recover by removing special chars before capture
try:
theURL = re.sub('\W+','', article.url)
if self.filterRegex.match(str(theURL)):
print("INFO - %s" %(article.url))
except:
print("WARN - Cannot apply filter due to special char in URL: %s" %(article.url))
return 0
|
brvnl/master
|
crawler/NPSpyder.bkp.py
|
Python
|
gpl-3.0
| 4,620
|
[
"VisIt"
] |
6ab9530a9b008397c66cfbd33c99545cf900b1e77392332e1b5ac43f785a038d
|
#!/usr/bin/env python
########################################################################
# File : dirac-framework-ping-service
# Author : Stuart Paterson
########################################################################
"""
Ping the given DIRAC Service
Example:
$ dirac-framework-ping-service WorkloadManagement PilotManager
{'OK': True,
'Value': {'cpu times': {'children system time': 0.0,
'children user time': 0.0,
'elapsed real time': 8778481.7200000007,
'system time': 54.859999999999999,
'user time': 361.06999999999999},
'host uptime': 4485212L,
'load': '3.44 3.90 4.02',
'name': 'WorkloadManagement/PilotManager',
'service start time': datetime.datetime(2011, 2, 21, 8, 58, 35, 521438),
'service uptime': 85744,
'service url': 'dips://dirac.in2p3.fr:9171/WorkloadManagement/PilotManager',
'time': datetime.datetime(2011, 3, 14, 11, 47, 40, 394957),
'version': 'v5r12-pre9'},
'rpcStub': (('WorkloadManagement/PilotManager',
{'delegatedDN': '/O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar',
'delegatedGroup': 'dirac_user',
'skipCACheck': True,
'timeout': 120}),
'ping',
())}
"""
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(
(
"URL: URL of the service to ping (instead of System and Service)",
"System/Service: Full component name (ie: WorkloadManagement/Matcher)",
"System: Name of the DIRAC system (ie: WorkloadManagement)",
)
)
Script.registerArgument(" Service: Name of the DIRAC service (ie: Matcher)", mandatory=False)
_, args = Script.parseCommandLine(ignoreErrors=True)
system = None
service = None
url = None
if len(args) == 1:
# it is a URL
if args[0].startswith("dips://"):
url = args[0]
# It is System/Service
else:
sys_serv = args[0].split("/")
if len(sys_serv) != 2:
Script.showHelp(exitCode=1)
else:
system, service = sys_serv
elif len(args) == 2:
system, service = args[0], args[1]
else:
Script.showHelp(exitCode=1)
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
result = dirac.pingService(system, service, printOutput=True, url=url)
if not result:
print("ERROR: Null result from ping()")
exitCode = 2
elif not result["OK"]:
print("ERROR: ", result["Message"])
exitCode = 2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_framework_ping_service.py
|
Python
|
gpl-3.0
| 3,003
|
[
"DIRAC"
] |
946c0daacfd1f71d365892cde64d096a36ead009ca4c588aa2b215a41c7e9d32
|
""" FileManager for ... ?
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import os
import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManager.FileManagerBase import FileManagerBase
from DIRAC.Core.Utilities.List import stringListToString, intListToString, breakListIntoChunks
# The logic of some methods is basically a copy/paste from the FileManager class,
# so I could have inherited from it. However, I did not want to depend on it
class FileManagerPs(FileManagerBase):
def __init__(self, database=None):
super(FileManagerPs, self).__init__(database)
######################################################
#
# The all important _findFiles and _getDirectoryFiles methods
#
def _findFiles(self, lfns, metadata=["FileID"], allStatus=False, connection=False):
"""Returns the information for the given lfns
The logic works nicely in the FileManager, so I pretty much copied it.
:param lfns: list of lfns
:param metadata: list of params that we want to get for each lfn
:param allStatus: consider all file status or only those defined in db.visibleFileStatus
:return successful/failed convention. successful is a dict < lfn : dict of metadata >
"""
connection = self._getConnection(connection)
dirDict = self._getFileDirectories(lfns)
result = self.db.dtree.findDirs(list(dirDict))
if not result["OK"]:
return result
directoryIDs = result["Value"]
failed = {}
successful = {}
for dirPath in directoryIDs:
fileNames = dirDict[dirPath]
res = self._getDirectoryFiles(
directoryIDs[dirPath], fileNames, metadata, allStatus=allStatus, connection=connection
)
for fileName, fileDict in res.get("Value", {}).items():
fname = os.path.join(dirPath, fileName)
successful[fname] = fileDict
# The lfns that are not in successful nor failed don't exist
for failedLfn in set(lfns) - set(successful):
failed.setdefault(failedLfn, "No such file or directory")
return S_OK({"Successful": successful, "Failed": failed})
def _findFileIDs(self, lfns, connection=False):
"""Find lfn <-> FileID correspondence"""
connection = self._getConnection(connection)
failed = {}
successful = {}
# If there is only one lfn, we might as well make a direct query
if len(lfns) == 1:
lfn = list(lfns)[0] # if lfns is a dict, list(lfns) returns lfns.keys()
pathPart, filePart = os.path.split(lfn)
result = self.db.executeStoredProcedure(
"ps_get_file_id_from_lfn", (pathPart, filePart, "ret1"), outputIds=[2]
)
if not result["OK"]:
return result
fileId = result["Value"][0]
if not fileId:
failed[lfn] = "No such file or directory"
else:
successful[lfn] = fileId
else:
# We separate the files by directory
filesInDirDict = self._getFileDirectories(lfns)
# We get the directory ids
result = self.db.dtree.findDirs(list(filesInDirDict))
if not result["OK"]:
return result
directoryPathToIds = result["Value"]
# For each directory, we get the file ids of the files we want
for dirPath in directoryPathToIds:
fileNames = filesInDirDict[dirPath]
dirID = directoryPathToIds[dirPath]
formatedFileNames = stringListToString(fileNames)
result = self.db.executeStoredProcedureWithCursor(
"ps_get_file_ids_from_dir_id", (dirID, formatedFileNames)
)
if not result["OK"]:
return result
for fileID, fileName in result["Value"]:
fname = os.path.join(dirPath, fileName)
successful[fname] = fileID
# The lfns that are not in successful dont exist
for failedLfn in set(lfns) - set(successful):
failed[failedLfn] = "No such file or directory"
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryFiles(self, dirID, fileNames, metadata_input, allStatus=False, connection=False):
"""For a given directory, and eventually given file, returns all the desired metadata
:param int dirID: directory ID
:param fileNames: the list of filenames, or []
:param metadata_input: list of desired metadata.
It can be anything from (FileName, DirID, FileID, Size, UID, Owner,
GID, OwnerGroup, Status, GUID, Checksum, ChecksumType, Type, CreationDate, ModificationDate, Mode)
:param bool allStatus: if False, only displays the files whose status is in db.visibleFileStatus
:returns: S_OK(files), where files is a dictionary indexed on filename, and values are dictionary of metadata
"""
connection = self._getConnection(connection)
metadata = list(metadata_input)
if "UID" in metadata:
metadata.append("Owner")
if "GID" in metadata:
metadata.append("OwnerGroup")
if "FileID" not in metadata:
metadata.append("FileID")
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileNames = stringListToString(fileNames)
fStatus = stringListToString(self.db.visibleFileStatus)
specificFiles = True if len(fileNames) else False
result = self.db.executeStoredProcedureWithCursor(
"ps_get_all_info_for_files_in_dir", (dirID, specificFiles, formatedFileNames, allStatus, fStatus)
)
if not result["OK"]:
return result
fieldNames = [
"FileName",
"DirID",
"FileID",
"Size",
"UID",
"Owner",
"GID",
"OwnerGroup",
"Status",
"GUID",
"Checksum",
"ChecksumType",
"Type",
"CreationDate",
"ModificationDate",
"Mode",
]
rows = result["Value"]
files = {}
for row in rows:
rowDict = dict(zip(fieldNames, row))
fileName = rowDict["FileName"]
# Returns only the required metadata
files[fileName] = dict((key, rowDict.get(key, "Unknown metadata field")) for key in metadata)
return S_OK(files)
def _getFileMetadataByID(self, fileIDs, connection=False):
"""Get standard file metadata for a list of files specified by FileID
:param fileIDS : list of file Ids
:returns: S_OK(files), where files is a dictionary indexed on fileID
and the values dictionaries containing the following info:
["FileID", "Size", "UID", "GID", "s.Status", "GUID", "CreationDate"]
"""
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileIds = intListToString(fileIDs)
result = self.db.executeStoredProcedureWithCursor("ps_get_all_info_for_file_ids", (formatedFileIds,))
if not result["OK"]:
return result
rows = result["Value"]
fieldNames = ["FileID", "Size", "UID", "GID", "s.Status", "GUID", "CreationDate"]
resultDict = {}
for row in rows:
rowDict = dict(zip(fieldNames, row))
rowDict["Size"] = int(rowDict["Size"])
rowDict["UID"] = int(rowDict["UID"])
rowDict["GID"] = int(rowDict["GID"])
resultDict[rowDict["FileID"]] = rowDict
return S_OK(resultDict)
def __insertMultipleFiles(self, allFileValues, wantedLfns):
"""Insert multiple files in one query. However, if there is a problem
with one file, all the query is rolled back.
:param allFileValues : dictionary of tuple with all the information about possibly more
files than we want to insert
:param wantedLfns : list of lfn that we want to insert
"""
fileValuesStrings = []
fileDescStrings = []
for lfn in wantedLfns:
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode = allFileValues[lfn]
utcNow = datetime.datetime.utcnow().replace(microsecond=0)
fileValuesStrings.append(
"(%s, %s, %s, %s, %s, '%s', '%s', '%s', '%s', '%s', '%s', %s)"
% (dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, utcNow, utcNow, mode)
)
fileDescStrings.append("(DirID = %s AND FileName = '%s')" % (dirID, fileName))
fileValuesStr = ",".join(fileValuesStrings)
fileDescStr = " OR ".join(fileDescStrings)
result = self.db.executeStoredProcedureWithCursor("ps_insert_multiple_file", (fileValuesStr, fileDescStr))
return result
def __chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def _insertFiles(self, lfns, uid, gid, connection=False):
"""Insert new files. lfns is a dictionary indexed on lfn, the values are
mandatory: DirID, Size, Checksum, GUID
optional : Owner (dict with username and group), ChecksumType (Adler32 by default), Mode (db.umask by default)
:param lfns : lfns and info to insert
:param uid : user id, overwriten by Owner['username'] if defined
:param gid : user id, overwriten by Owner['group'] if defined
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
res = self._getStatusInt("AprioriGood", connection=connection)
if res["OK"]:
statusID = res["Value"]
else:
return res
lfnsToRetry = []
fileValues = {}
fileDesc = {}
# Prepare each file separately
for lfn in lfns:
# Get all the info
fileInfo = lfns[lfn]
dirID = fileInfo["DirID"]
fileName = os.path.basename(lfn)
size = fileInfo["Size"]
ownerDict = fileInfo.get("Owner", None)
checksum = fileInfo["Checksum"]
checksumtype = fileInfo.get("ChecksumType", "Adler32")
guid = fileInfo["GUID"]
mode = fileInfo.get("Mode", self.db.umask)
s_uid = uid
s_gid = gid
# overwrite the s_uid and s_gid if defined in the lfn info
if ownerDict:
result = self.db.ugManager.getUserAndGroupID(ownerDict)
if result["OK"]:
s_uid, s_gid = result["Value"]
fileValues[lfn] = (dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode)
fileDesc[(dirID, fileName)] = lfn
chunkSize = 200
allChunks = list(self.__chunks(list(lfns), chunkSize))
for lfnChunk in allChunks:
result = self.__insertMultipleFiles(fileValues, lfnChunk)
if result["OK"]:
allIds = result["Value"]
for dirId, fileName, fileID in allIds:
lfn = fileDesc[(dirId, fileName)]
successful[lfn] = lfns[lfn]
successful[lfn]["FileID"] = fileID
else:
lfnsToRetry.extend(lfnChunk)
# If we are here, that means that the multiple insert failed, so we do one by one
for lfn in lfnsToRetry:
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode = fileValues[lfn]
# insert
result = self.db.executeStoredProcedureWithCursor(
"ps_insert_file", (dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode)
)
if not result["OK"]:
failed[lfn] = result["Message"]
else:
fileID = result["Value"][0][0]
successful[lfn] = lfns[lfn]
successful[lfn]["FileID"] = fileID
return S_OK({"Successful": successful, "Failed": failed})
def _getFileIDFromGUID(self, guids, connection=False):
"""Returns the file ids from list of guids
:param guids : list of guid
:returns dictionary < guid : fileId >
"""
connection = self._getConnection(connection)
if not guids:
return S_OK({})
if not isinstance(guids, (list, tuple)):
guids = [guids]
# formatedGuids = ','.join( [ '"%s"' % guid for guid in guids ] )
formatedGuids = stringListToString(guids)
result = self.db.executeStoredProcedureWithCursor("ps_get_file_ids_from_guids", (formatedGuids,))
if not result["OK"]:
return result
guidDict = dict((guid, fileID) for guid, fileID in result["Value"])
return S_OK(guidDict)
def getLFNForGUID(self, guids, connection=False):
"""Returns the lfns matching given guids"""
connection = self._getConnection(connection)
if not guids:
return S_OK({})
if not isinstance(guids, (list, tuple)):
guids = [guids]
formatedGuids = stringListToString(guids)
result = self.db.executeStoredProcedureWithCursor("ps_get_lfns_from_guids", (formatedGuids,))
if not result["OK"]:
return result
guidDict = dict((guid, lfn) for guid, lfn in result["Value"])
failedGuid = set(guids) - set(guidDict)
failed = dict.fromkeys(failedGuid, "GUID does not exist") if failedGuid else {}
return S_OK({"Successful": guidDict, "Failed": failed})
######################################################
#
# _deleteFiles related methods
#
def _deleteFiles(self, fileIDs, connection=False):
"""Delete a list of files and the associated replicas
:param fileIDS : list of fileID
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
replicaPurge = self.__deleteFileReplicas(fileIDs)
filePurge = self.__deleteFiles(fileIDs, connection=connection)
if not replicaPurge["OK"]:
return replicaPurge
if not filePurge["OK"]:
return filePurge
return S_OK()
def __deleteFileReplicas(self, fileIDs, connection=False):
"""Delete all the replicas from the file ids
:param fileIDs: list of file ids
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
if not fileIDs:
return S_OK()
formatedFileIds = intListToString(fileIDs)
result = self.db.executeStoredProcedureWithCursor("ps_delete_replicas_from_file_ids", (formatedFileIds,))
if not result["OK"]:
return result
errno, msg = result["Value"][0]
if errno:
return S_ERROR(msg)
return S_OK()
def __deleteFiles(self, fileIDs, connection=False):
"""Delete the files from their ids
:param fileIDs: list of file ids
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
formatedFileIds = intListToString(fileIDs)
result = self.db.executeStoredProcedureWithCursor("ps_delete_files", (formatedFileIds,))
if not result["OK"]:
return result
errno, msg = result["Value"][0]
if errno:
return S_ERROR(msg)
return S_OK()
def __insertMultipleReplicas(self, allReplicaValues, lfnsChunk):
"""Insert multiple replicas in one query. However, if there is a problem
with one replica, all the query is rolled back.
:param allReplicaValues : dictionary of tuple with all the information about possibly more
replica than we want to insert
:param lfnsChunk : list of lfn that we want to insert
"""
repValuesStrings = []
repDescStrings = []
for lfn in lfnsChunk:
fileID, seID, statusID, replicaType, pfn = allReplicaValues[lfn]
utcNow = datetime.datetime.utcnow().replace(microsecond=0)
repValuesStrings.append(
"(%s,%s,'%s','%s','%s','%s','%s')" % (fileID, seID, statusID, replicaType, utcNow, utcNow, pfn)
)
repDescStrings.append("(r.FileID = %s AND SEID = %s)" % (fileID, seID))
repValuesStr = ",".join(repValuesStrings)
repDescStr = " OR ".join(repDescStrings)
result = self.db.executeStoredProcedureWithCursor("ps_insert_multiple_replica", (repValuesStr, repDescStr))
return result
def _insertReplicas(self, lfns, master=False, connection=False):
"""Insert new replicas. lfns is a dictionary with one entry for each file. The keys are lfns, and values are dict
with mandatory attributes : FileID, SE (the name), PFN
:param lfns: lfns and info to insert
:param master: true if they are master replica, otherwise they will be just 'Replica'
:return: successful/failed convention, with successful[lfn] = true
"""
chunkSize = 200
connection = self._getConnection(connection)
# Add the files
failed = {}
successful = {}
# Get the status id of AprioriGood
res = self._getStatusInt("AprioriGood", connection=connection)
if not res["OK"]:
return res
statusID = res["Value"]
lfnsToRetry = []
repValues = {}
repDesc = {}
# treat each file after each other
for lfn in lfns.keys():
fileID = lfns[lfn]["FileID"]
seName = lfns[lfn]["SE"]
if isinstance(seName, six.string_types):
seList = [seName]
elif isinstance(seName, list):
seList = seName
else:
return S_ERROR("Illegal type of SE list: %s" % str(type(seName)))
replicaType = "Master" if master else "Replica"
pfn = lfns[lfn]["PFN"]
# treat each replica of a file after the other
# (THIS CANNOT WORK... WE ARE ONLY CAPABLE OF DOING ONE REPLICA PER FILE AT THE TIME)
for seName in seList:
# get the SE id
res = self.db.seManager.findSE(seName)
if not res["OK"]:
failed[lfn] = res["Message"]
continue
seID = res["Value"]
# This is incompatible with adding multiple replica at the time for a given file
repValues[lfn] = (fileID, seID, statusID, replicaType, pfn)
repDesc[(fileID, seID)] = lfn
allChunks = list(self.__chunks(list(lfns), chunkSize))
for lfnChunk in allChunks:
result = self.__insertMultipleReplicas(repValues, lfnChunk)
if result["OK"]:
allIds = result["Value"]
for fileId, seId, repId in allIds:
lfn = repDesc[(fileId, seId)]
successful[lfn] = True
lfns[lfn]["RepID"] = repId
else:
lfnsToRetry.extend(lfnChunk)
for lfn in lfnsToRetry:
fileID, seID, statusID, replicaType, pfn = repValues[lfn]
# insert the replica and its info
result = self.db.executeStoredProcedureWithCursor(
"ps_insert_replica", (fileID, seID, statusID, replicaType, pfn)
)
if not result["OK"]:
failed[lfn] = result["Message"]
else:
replicaID = result["Value"][0][0]
lfns[lfn]["RepID"] = replicaID
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _getRepIDsForReplica(self, replicaTuples, connection=False):
"""Get the Replica IDs for (fileId, SEID) couples
:param repliacTuples : list of (fileId, SEID) couple
:returns { fileID : { seID : RepID } }
"""
connection = self._getConnection(connection)
replicaDict = {}
for fileID, seID in replicaTuples:
result = self.db.executeStoredProcedure("ps_get_replica_id", (fileID, seID, "repIdOut"), outputIds=[2])
if not result["OK"]:
return result
repID = result["Value"][0]
# if the replica exists, we add it to the dict
if repID:
replicaDict.setdefault(fileID, {}).setdefault(seID, repID)
return S_OK(replicaDict)
######################################################
#
# _deleteReplicas related methods
#
def _deleteReplicas(self, lfns, connection=False):
"""Deletes replicas. The deletion of replicas that do not exist is successful
:param lfns : dictinary with lfns as key, and the value is a dict with a mandatory "SE" key,
corresponding to the SE name or SE ID
:returns: successful/failed convention, with successful[lfn] = True
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
# First we get the fileIds from our lfns
res = self._findFiles(list(lfns), ["FileID"], connection=connection)
if not res["OK"]:
return res
# If the file does not exist we consider the deletion successful
for lfn, error in res["Value"]["Failed"].items():
if error == "No such file or directory":
successful[lfn] = True
else:
failed[lfn] = error
lfnFileIDDict = res["Value"]["Successful"]
for lfn, fileDict in lfnFileIDDict.items():
fileID = fileDict["FileID"]
# Then we get our StorageElement Id (cached in seManager)
se = lfns[lfn]["SE"]
# if se is already the se id, findSE will return it
res = self.db.seManager.findSE(se)
if not res["OK"]:
return res
seID = res["Value"]
# Finally remove the replica
result = self.db.executeStoredProcedureWithCursor("ps_delete_replica_from_file_and_se_ids", (fileID, seID))
if not result["OK"]:
failed[lfn] = result["Message"]
continue
errno, errMsg = result["Value"][0]
if errno:
failed[lfn] = errMsg
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# _setReplicaStatus _setReplicaHost _setReplicaParameter methods
# _setFileParameter method
#
def _setReplicaStatus(self, fileID, se, status, connection=False):
"""Set the status of a replica
:param fileID : file id
:param se : se name or se id
:param status : status to be applied
:returns: S_OK() or S_ERROR(msg)
"""
if status not in self.db.validReplicaStatus:
return S_ERROR("Invalid replica status %s" % status)
connection = self._getConnection(connection)
res = self._getStatusInt(status, connection=connection)
if not res["OK"]:
return res
statusID = res["Value"]
# Then we get our StorageElement Id (cached in seManager)
res = self.db.seManager.findSE(se)
if not res["OK"]:
return res
seID = res["Value"]
result = self.db.executeStoredProcedureWithCursor("ps_set_replica_status", (fileID, seID, statusID))
if not result["OK"]:
return result
affected = result["Value"][0][0] # Affected is the number of raws updated
if not affected:
return S_ERROR("Replica does not exist")
return S_OK()
def _setReplicaHost(self, fileID, se, newSE, connection=False):
"""Move a replica from one SE to another (I don't think this should be called
:param fileID : file id
:param se : se name or se id of the previous se
:param newSE : se name or se id of the new se
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
# Get the new se id
res = self.db.seManager.findSE(newSE)
if not res["OK"]:
return res
newSEID = res["Value"]
# Get the old se id
res = self.db.seManager.findSE(se)
if not res["OK"]:
return res
oldSEID = res["Value"]
# update
result = self.db.executeStoredProcedureWithCursor("ps_set_replica_host", (fileID, oldSEID, newSEID))
if not result["OK"]:
return result
affected = result["Value"][0][0]
if not affected:
return S_ERROR("Replica does not exist")
else:
return S_OK()
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
"""Generic method to set a file parameter
:param fileID : id of the file
:param paramName : the file parameter you want to change
It should be one of [ UID, GID, Status, Mode]. However, in case of
unexpected parameter, and to stay compatible with the other Manager,
there is a manual request done.
:param paramValue : the value (raw, or id) to insert
:returns: S_OK() or S_ERROR
"""
connection = self._getConnection(connection)
# The PS associated with a given parameter
psNames = {
"UID": "ps_set_file_uid",
"GID": "ps_set_file_gid",
"Status": "ps_set_file_status",
"Mode": "ps_set_file_mode",
}
psName = psNames.get(paramName, None)
# If there is an associated procedure, we go for it
if psName:
result = self.db.executeStoredProcedureWithCursor(psName, (fileID, paramValue))
if not result["OK"]:
return result
_affected = result["Value"][0][0]
# If affected = 0, the file does not exist, but who cares...
# In case this is a 'new' parameter, we have a failback solution, but we
# should add a specific ps for it
else:
req = "UPDATE FC_Files SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID IN (%s)" % (
paramName,
paramValue,
intListToString(fileID),
)
return self.db._update(req, connection)
return S_OK()
######################################################
#
# _getFileReplicas related methods
#
def _getFileReplicas(self, fileIDs, fields_input=None, allStatus=False, connection=False):
"""Get replicas for the given list of files specified by their fileIDs
:param fileIDs : list of file ids
:param fields_input : metadata of the Replicas we are interested in (default to PFN)
:param allStatus : if True, all the Replica statuses will be considered,
otherwise, only the db.visibleReplicaStatus
:returns S_OK with a dict { fileID : { SE name : dict of metadata } }
"""
if fields_input is None:
fields_input = ["PFN"]
fields = list(fields_input)
# always add Status in the list of required fields
if "Status" not in fields:
fields.append("Status")
# We initialize the dictionary with empty dict
# as default value, because this is what we want for
# non existing replicas
replicas = {fileID: {} for fileID in fileIDs}
# Format the status to be used in a IN clause in the stored procedure
fStatus = stringListToString(self.db.visibleReplicaStatus)
fieldNames = ["FileID", "SE", "Status", "RepType", "CreationDate", "ModificationDate", "PFN"]
for chunks in breakListIntoChunks(fileIDs, 1000):
# Format the FileIDs to be used in a IN clause in the stored procedure
formatedFileIds = intListToString(chunks)
result = self.db.executeStoredProcedureWithCursor(
"ps_get_all_info_of_replicas_bulk", (formatedFileIds, allStatus, fStatus)
)
if not result["OK"]:
return result
rows = result["Value"]
for row in rows:
rowDict = dict(zip(fieldNames, row))
se = rowDict["SE"]
fileID = rowDict["FileID"]
replicas[fileID][se] = dict((key, rowDict.get(key, "Unknown metadata field")) for key in fields)
return S_OK(replicas)
def countFilesInDir(self, dirId):
"""Count how many files there is in a given Directory
:param dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
result = self.db.executeStoredProcedure("ps_count_files_in_dir", (dirId, "ret1"), outputIds=[1])
if not result["OK"]:
return result
res = S_OK(result["Value"][0])
return res
##########################################################################
#
# We overwrite some methods from the base class because of the new DB constraints or perf reasons
#
# Some methods could be inherited in the future if we have perf problems. For example
# * setFileGroup
# * setFileOwner
# * setFileMode
# * changePath*
#
##########################################################################
def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
"""This updates the directory usage, but is now done by triggers in the DB"""
return S_OK()
def _computeStorageUsageOnRemoveFile(self, lfns, connection=False):
"""Again nothing to compute, all done by the triggers"""
directorySESizeDict = {}
return S_OK(directorySESizeDict)
# "REMARQUE : THIS IS STILL TRUE, BUT YOU MIGHT WANT TO CHECK FOR A GIVEN GUID ANYWAY
# def _checkUniqueGUID( self, lfns, connection = False ):
# """ The GUID unicity is ensured at the DB level, so we will have similar message if the insertion fails"""
#
# failed = {}
# return failed
def getDirectoryReplicas(self, dirID, path, allStatus=False, connection=False):
"""
This is defined in the FileManagerBase but it relies on the SEManager to get the SE names.
It is good practice in software, but since the SE and Replica tables are bound together in the DB,
I might as well resolve the name in the query
Get the replicas for all the Files in the given Directory
:param int dirID: ID of the directory
:param unused path: useless
:param bool allStatus: whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus
values from the configuration
"""
# We format the visible file/replica satus so we can give it as argument to the ps
# It is used in an IN clause, so it looks like --'"AprioriGood","Trash"'--
# fStatus = ','.join( [ '"%s"' % status for status in self.db.visibleFileStatus ] )
# rStatus = ','.join( [ '"%s"' % status for status in self.db.visibleReplicaStatus ] )
fStatus = stringListToString(self.db.visibleFileStatus)
rStatus = stringListToString(self.db.visibleReplicaStatus)
result = self.db.executeStoredProcedureWithCursor(
"ps_get_replicas_for_files_in_dir", (dirID, allStatus, fStatus, rStatus)
)
if not result["OK"]:
return result
resultDict = {}
for fileName, _fileID, seName, pfn in result["Value"]:
resultDict.setdefault(fileName, {}).setdefault(seName, []).append(pfn)
return S_OK(resultDict)
def _getFileLFNs(self, fileIDs):
"""Get the file LFNs for a given list of file IDs
We need to override this method because the base class hard codes the column names
"""
successful = {}
for chunks in breakListIntoChunks(fileIDs, 1000):
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileIds = intListToString(chunks)
result = self.db.executeStoredProcedureWithCursor("ps_get_full_lfn_for_file_ids", (formatedFileIds,))
if not result["OK"]:
return result
# The result contains FileID, LFN
for row in result["Value"]:
successful[row[0]] = row[1]
missingIds = set(fileIDs) - set(successful)
failed = dict.fromkeys(missingIds, "File ID not found")
return S_OK({"Successful": successful, "Failed": failed})
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
res = self.db.seManager.findSE(seName)
if not res["OK"]:
return res
seID = res["Value"]
return self.db.executeStoredProcedureWithCursor("ps_get_se_dump", (seID,))
|
ic-hep/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/FileManager/FileManagerPs.py
|
Python
|
gpl-3.0
| 34,025
|
[
"DIRAC"
] |
58e9802d892ef9b686430a5911c8374b996a1f27566ecf1d74f33fab5cc3645a
|
import sys
import os
import copy
from subprocess import call
from rdkit import Chem
from rdkit.Chem import AllChem
import coot_svn_repo_revision
import pyrogen_swig as pysw
import pyrogen_boost
import atom_types
from optparse import OptionParser
import tautomer
import urllib
from jay_util import *
global pyrogen_version
pyrogen_version = "0.0-pre"
global run_mogul
global smiles_dict
run_mogul = True
smiles_dict = False
def make_mogul_ins_file(mogul_ins_file_name, mogul_out_file_name, sdf_file_name):
f = open(mogul_ins_file_name, 'w')
if f:
f.write('mogul molecule file ')
f.write(sdf_file_name)
f.write('\n')
f.write('mogul output file ')
f.write(mogul_out_file_name)
f.write('\n')
f.write('mogul output distribution all on\n')
f.write('bond all\n')
f.write('angle all\n')
# f.write('torsion all\n')
# f.write('ring all\n')
f.write('config output format CSV\n')
f.write('config output items fragment_type atom_indices query_value nhits mean median sd z-score dmin\n')
f.write('config search all filter exclude_solvents\n')
f.write('config output invalid_fragments exclude\n')
f.close()
return f
# return True for good, False for bad/not-run
#
def execute_mogul(sdf_file_name, mogul_ins_file_name, mogul_out_file_name):
f = make_mogul_ins_file(mogul_ins_file_name, mogul_out_file_name, sdf_file_name)
if f:
# print 'now run mogul using ins file %s' % mogul_ins_file_name
if run_mogul:
state = call(['mogul', '-ins', mogul_ins_file_name])
return (state == 0)
else:
return False
else:
return False
def atom_name_from_atomic_number_and_count(element, count):
name = element
name += str(count)
return name
def add_atom_names(mol):
nz = {}
atom_names = []
for atom in mol.GetAtoms():
try:
n = atom.GetProp('name')
atom_names.append(n)
except KeyError:
z = atom.GetAtomicNum()
if z in nz:
nz[z] = nz[z] + 1
else:
nz[z] = 1;
ele = atom.GetSymbol().upper()
name = atom_name_from_atomic_number_and_count(ele, nz[z])
p_name = pad_atom_name(name, ele)
atom.SetProp("name", p_name)
atom_names.append(p_name)
return atom_names
def convert_to_coot_bond_type(rdkit_type):
out_type = 'single'
if (rdkit_type == Chem.rdchem.BondType.SINGLE):
out_type = 'single'
if (rdkit_type == Chem.rdchem.BondType.AROMATIC):
out_type = 'aromatic'
if (rdkit_type == Chem.rdchem.BondType.DOUBLE):
out_type = 'double'
if (rdkit_type == Chem.rdchem.BondType.TRIPLE):
out_type = 'triple'
if (rdkit_type == Chem.rdchem.BondType.ONEANDAHALF):
out_type = 'deloc'
return out_type
def pad_atom_name(name, element):
padded = name
if (len(element) == 1):
if (len(name) == 2):
padded = ' ' + name + ' '
if (len(name) == 3):
padded = ' ' + name
if (len(element) == 2):
if (len(name) == 2):
padded = name + ' '
if (len(name) == 3):
padded = name + ' '
return padded
def is_smiles_file(file_name):
bits = file_name.rsplit(".")
if len(bits) > 1:
return bits[1] == 'smi'
else:
return False
def is_comp_id(comp_id):
return len(comp_id) == 3
def is_mdl_file(file_name):
bits = file_name.rsplit(".")
if (len(bits) < 2):
return False
else:
idx = len(bits) - 1
if (bits[idx] == 'mol'):
return True
else:
if (bits[idx] == 'mdl'):
return True
else:
return False
# return the contents of file_name
def read_file(file_name):
f = open(file_name)
return f.read()
# return False or a file_name
#
def get_pdbe_cif_for_comp_id(comp_id):
try:
file_name = "PDBe-" + comp_id + ".cif"
url = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/pdbechem/files/mmcif/' + comp_id + '.cif'
status = urllib.urlretrieve(url, file_name)
return file_name
except IOError as e:
print e
print "Failed: Can't ftp fr", url, "and write file", file_name
exit(2)
def make_restraints_for_bond_orders(mol):
restraints = {}
bond_list = []
for bond in mol.GetBonds():
type = bond.GetBondType()
coot_bond_type = convert_to_coot_bond_type(type)
at_1 = bond.GetBeginAtom()
at_2 = bond.GetEndAtom()
name_1 = at_1.GetProp('name')
name_2 = at_2.GetProp('name')
item = [name_1, name_2, coot_bond_type, 1.0, 1.0]
bond_list.append(item)
restraints['_chem_comp_bond'] = bond_list
restraints['_chem_comp'] = [mol.GetProp('comp_id'),
mol.GetProp('comp_id'),
mol.GetProp('name'),
'non-polymer',
mol.GetNumAtoms(),
mol.GetNumAtoms(),
'.']
return restraints
# return True if mogul is not run or mogul exe is in place.
# return False if mogul is expected but not found.
def test_for_mogul():
if run_mogul:
mogol_exe = which('mogul')
if (mogol_exe == None):
print "mogul not found in path"
return False
else:
return True
else:
return True # OK, really
# this can throw a TypeError
#
def get_smiles_from_comp_id(comp_id):
global smiles_dict
if (not smiles_dict):
read_smiles_tab('smiles.tab')
return smiles_dict[comp_id]
# return a dictionary or False (if the file does not exist)
# (can this go inside get_smiles_from_comp_id?)
#
def read_smiles_tab(file_name):
global smiles_dict
try:
smiles_dict = {}
f = open(file_name)
lines = f.readlines()
for line in lines:
bits = line.rstrip().rsplit()
smiles_dict[bits[0]] = bits[2]
return True
except IOError as e:
smiles_dict = True # we've tested for it
return False
# return a pair, the smiles string and the molecule name (which might be blank)
#
def get_smiles_from_file(file_name):
if not os.path.exists(file_name):
return False,False
else:
f = open(file_name)
smi_line = f.readline()
parts = smi_line.split()
return parts[0], ' '.join(parts[1:])
def make_picture(mol, conf_id, comp_id, output_postfix):
output_file_name = comp_id + "-" + output_postfix + '.png'
make_picture_to_file(mol, conf_id, output_file_name)
def make_picture_to_file(mol, conf_id, output_file_name):
try:
from rdkit.Chem import Draw
import Image
state = Draw.MolToFile(mol, size=(300,300), fileName=output_file_name, confId=conf_id)
# print 'INFO:: wrote PNG "' + output_file_name + '"'
# img = Draw.MolToImage(mol, fitImage=True, size=(900,900))
# img2 = img.resize((300, 300), Image.ANTIALIAS)
# img2.save(output_file_name + "resampled.png")
except ImportError as e:
print 'ImportError:', e
except ValueError as e:
print 'ValueError in make_picture():', e
def make_restraints_from_smiles(smiles_string, comp_id, compound_name, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name, quartet_planes, quartet_hydrogen_planes, use_mmff, match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_file_for_names_match):
if not test_for_mogul():
# return False
exit(1)
m = Chem.MolFromSmiles(smiles_string)
if compound_name:
m.SetProp('_Name', compound_name)
return make_restraints(m, comp_id, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name, quartet_planes, quartet_hydrogen_planes, use_mmff, match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_file_for_names_match)
# return the molecule and return value from make_restraints
#
def make_restraints_from_mdl(mol_file_name, comp_id, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name, quartet_planes, quartet_hydrogen_planes, use_mmff, match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_files_for_names_match):
if (not (test_for_mogul())):
# return False, False
exit(1)
if not os.path.exists(mol_file_name):
print "No such file:", mol_file_name
exit(1)
compound_name = '.'
m = Chem.MolFromMolFile(mol_file_name)
return m, make_restraints(m, comp_id, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name,
quartet_planes, quartet_hydrogen_planes, use_mmff,
match_atom_names_to_dict_flag, comp_id_list_for_names_match,
dict_files_for_names_match)
# return a list of (mol, comp_id) pairs for every ligand in the cif
# file. Often only one of course.
#
def make_restraints_from_mmcif_dict(cif_file_name_in, comp_id, mogul_dir,
output_dir, output_postfix,
quartet_planes, quartet_hydrogen_planes, use_mmff,
pdb_out_file_name, mmcif_restraints_out_file_name):
if not test_for_mogul():
return [(None, None)]
if comp_id == "TRY_ALL_COMP_IDS":
types = pysw.types_from_mmcif_dictionary(cif_file_name_in)
l = []
for r_type in types:
file_name_stub = r_type + "-" + output_postfix
if options.output_dir != ".":
file_name_stub = os.path.join(options.output_dir, file_name_stub)
pdb_out_file_name_local = file_name_stub + ".pdb"
mmcif_restraints_out_file_name_local = file_name_stub + ".cif"
#
t_mol = make_restraints_from_mmcif_dict_single(cif_file_name_in, r_type, mogul_dir,
output_postfix,
quartet_planes,
quartet_hydrogen_planes, use_mmff,
pdb_out_file_name_local,
mmcif_restraints_out_file_name_local)
l.append((t_mol, r_type))
return l
else:
# just the one
m = make_restraints_from_mmcif_dict_single(cif_file_name_in, comp_id, mogul_dir, output_postfix,
quartet_planes, quartet_hydrogen_planes, use_mmff,
pdb_out_file_name, mmcif_restraints_out_file_name)
return [(m, comp_id)]
# return a mol, given a sensible comp_id.
#
# Return None on failure
#
def make_restraints_from_mmcif_dict_single(cif_file_name_in, comp_id, mogul_dir, output_postfix,
quartet_planes, quartet_hydrogen_planes, use_mmff,
pdb_out_file_name, mmcif_restraints_out_file_name):
# print 'in make_restraints_from_mmcif_dict_single() comp_id is ', comp_id
# print 'in make_restraints_from_mmcif_dict_single() cif_file_name_in is ', cif_file_name_in
if not test_for_mogul():
return [(None, None)]
mogul_file_name_stub = comp_id + '-' + output_postfix # file component of files within mogul_dir
m = pyrogen_boost.rdkit_mol_chem_comp_pdbx(cif_file_name_in, comp_id)
if False: # debugging
for atom in m.GetAtoms():
try:
name = atom.GetProp('name')
chir = atom.GetProp('_CIPCode')
print ' atom', atom, 'name', name, 'chir', chir
except KeyError as e:
print 'pyrogen.py:: atom', atom, " with name ", name, ' has no _CIPCode property'
pass
# maybe user didn't select the correct comp_id for the given dictionary mmcif
if m.GetNumAtoms() == 0:
print 'No atoms for comp_id', comp_id
return False
else :
name = ''
try:
name = m.GetProp('_Name')
except KeyError:
print 'caught KeyError in make_restraints_from_mmcif_dict_single() trying GetProp _Name'
return make_restraints(m, comp_id, mogul_dir, mogul_file_name_stub,
pdb_out_file_name, mmcif_restraints_out_file_name,
quartet_planes, quartet_hydrogen_planes, use_mmff, False, False, False)
def n_hydrogens(mol):
n_H = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 1:
n_H += 1
return n_H
# return sane_H_mol
#
def make_restraints(m, comp_id, mogul_dir, mogul_file_name_stub, pdb_out_file_name, mmcif_dict_name,
quartet_planes, quartet_hydrogen_planes, use_mmff,
match_atom_names_to_dict_flag,
comp_id_list_for_names_match,
dict_files_for_names_match):
# test here (or in calling functions) if m is sane (i.e. is an rdkit molecule)
if not isinstance(m, Chem.rdchem.Mol):
print 'ERROR:: not a molecule'
return False
n_attempts = 20 * m.GetNumAtoms() # default is 10 * number of atoms.
# pH-dependent protonation or deprotonation
#
do_hydrogen_atoms_shift = True
try:
compound_name = m.GetProp('_Name');
except KeyError:
# this happens all the time when we start from a SMILES, users don't need to see it.
# print 'caught key error in trying to get _Name in make_restraints() for m'
compound_name = '.'
except AttributeError as e:
# Do we need to see this? Perhaps make_restraints() needs to return a status.
# print 'AttributeError: problem with molecule in make_restraints()', e, ' on object:', m
return
m_H = m
if n_hydrogens(m) == 0:
m_H = AllChem.AddHs(m)
if do_hydrogen_atoms_shift:
# simple sane pH H-exchanges
sane_H_mol = pyrogen_boost.hydrogen_transformations(m_H)
# print >>file('sane_H.mol','w+'),Chem.MolToMolBlock(sane_H_mol)
else:
sane_H_mol = m_H
# This makes UFF types, which can fail sometimes.
conf_id = AllChem.EmbedMolecule(sane_H_mol, maxAttempts=n_attempts)
if use_mmff:
AllChem.MMFFOptimizeMolecule(sane_H_mol, confId=conf_id)
if False: # debugging output
ba = pyrogen_boost.mmff_bonds_and_angles(sane_H_mol) # uses _forcefield_ of the molecule
n_bonds = ba.bonds_size()
if n_bonds > 0:
for i_bond in range(n_bonds):
bond = ba.get_bond(i_bond)
print bond.get_idx_1(), bond.get_idx_2(), bond.get_type(), \
bond.get_resting_bond_length(), bond.get_sigma()
n_angles = ba.angles_size()
if n_angles > 0:
for i_angle in range(n_angles):
angle = ba.get_angle(i_angle)
print angle.get_idx_1(), angle.get_idx_2(), angle.get_idx_3(), \
angle.get_resting_angle(), angle.get_sigma()
else:
AllChem.UFFOptimizeMolecule(sane_H_mol, confId=conf_id)
# AllChem.UFFOptimizeMolecule(sane_H_mol)
atom_names = add_atom_names(sane_H_mol)
all_set = atom_types.set_atom_types(sane_H_mol) # has deloc bonds now, potentially
# debug sane_H_mol
if True:
molblock = Chem.MolToMolBlock(sane_H_mol)
print >> file("sane_H_mol.mol",'w'), molblock
if (all_set != True):
return False
else:
sane_H_mol.SetProp('comp_id', comp_id)
sane_H_mol.SetProp('name', compound_name)
sd_local = mogul_file_name_stub + ".sdf"
sdf_file_name = os.path.join(mogul_dir, mogul_file_name_stub + '-mogul.sdf')
mogul_ins_file_name = os.path.join(mogul_dir, mogul_file_name_stub + '-mogul.ins')
mogul_out_file_name = os.path.join(mogul_dir, mogul_file_name_stub + '-mogul.out')
Chem.AllChem.ComputeGasteigerCharges(sane_H_mol)
moguled_mol = pyrogen_boost.mogulify(sane_H_mol) # Nitro bond orders (and other things?)
if not os.path.isdir(mogul_dir):
checked_mkdir(mogul_dir)
if os.path.isdir(mogul_dir):
mb = Chem.MolToMolBlock(moguled_mol)
print >> file(sdf_file_name,'w'), mb
else:
mb = Chem.MolToMolBlock(moguled_mol)
print >> file(sdf_file_name,'w'), mb
bor = make_restraints_for_bond_orders(sane_H_mol)
# print out the set types:
print_atom_props = False
if print_atom_props:
print '--- Atom Props ---'
for atom in sane_H_mol.GetAtoms():
charge = atom.GetProp('_GasteigerCharge') # string?
name = atom.GetProp('name')
try:
atom_type = atom.GetProp('atom_type')
is_aromatic = atom.GetIsAromatic()
hybrid = atom.GetHybridization()
f_charge = float(charge)
if print_atom_props:
print " atom: %s %s type: %s arom: %s hybrid: %s charge: %6.3f" % (name, atom.GetSymbol(),
atom_type.ljust(4),
str(is_aromatic).ljust(5),
str(hybrid).rjust(3),
f_charge)
except KeyError:
print "miss", name, atom.GetSymbol(), charge
#
replace_with_mmff_b_a_restraints = False
if use_mmff:
replace_with_mmff_b_a_restraints = True
# execute_mogul() tests if mogul is executable
#
mogul_state = execute_mogul(sdf_file_name, mogul_ins_file_name, mogul_out_file_name)
if mogul_state:
# Here we need to think about matching to reference
# dictionary of amino acids (for standard atom names).
# That function takes a dictionary and a mmdb::Residue.
# How does that fit in here?
#
restraints = pysw.mogul_out_to_mmcif_dict_by_mol(mogul_out_file_name, comp_id,
compound_name, sane_H_mol, bor,
mmcif_dict_name, # not used
quartet_planes,
quartet_hydrogen_planes,
replace_with_mmff_b_a_restraints)
# match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_file_for_names_match
if match_atom_names_to_dict_flag:
restraints = atom_match_dictionary(restraints, sane_H_mol,
comp_id_list_for_names_match,
dict_files_for_names_match)
pysw.write_restraints(restraints, mmcif_dict_name)
pysw.regularize_and_write_pdb(sane_H_mol, restraints, comp_id, pdb_out_file_name)
else:
# mogul failed or was not in the path:
if run_mogul == False:
# ... but that's OK if we told pyrogen to run without mogul
# sane_H_mol:
# print >>file('debug_sane_H.mol','w+'),Chem.MolToMolBlock(sane_H_mol)
restraints = pysw.mmcif_dict_from_mol(comp_id, compound_name, sane_H_mol,
mmcif_dict_name,
quartet_planes, quartet_hydrogen_planes,
replace_with_mmff_b_a_restraints)
if restraints == None:
print "No restraints"
return True # hacked in value
if match_atom_names_to_dict_flag:
restraints = atom_match_dictionary(restraints, sane_H_mol,
comp_id_list_for_names_match,
dict_files_for_names_match)
pysw.write_restraints(restraints, mmcif_dict_name)
pysw.write_pdb_from_mol(sane_H_mol, comp_id, pdb_out_file_name)
else:
# ... but not if we wanted to use mogul.
# (We get here if there is a licence error for mogul)
exit(1)
return sane_H_mol
def atom_match_dictionary(restraints, sane_H_mol, comp_id_list_for_names_match, dict_files_for_names_match):
template_comp_ids = ['CYS', 'ASP', 'GLU', 'HIS', 'ILE', 'LYS', 'LEU', 'MET',
'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL', 'TRP', 'TYR',
'G', 'C', 'GLC', 'MAN']
if isinstance(comp_id_list_for_names_match, basestring):
template_comp_ids = comp_id_list_for_names_match.split(',')
template_cif_dict_files_names = []
if isinstance(dict_files_for_names_match, basestring):
template_cif_dict_files_names = dict_files_for_names_match.split(',')
# don't use my set of comp_ids then
template_comp_ids = []
success,new_restraints,at_name_list = pysw.match_restraints_to_dictionaries(restraints,
template_comp_ids,
template_cif_dict_files_names)
if success:
n = len(sane_H_mol.GetAtoms())
if len(restraints['_chem_comp_atom']) == n:
restraints = new_restraints
for iat in range(n):
name = sane_H_mol.GetAtomWithIdx(iat).GetProp('name')
if name != restraints['_chem_comp_atom'][iat][0]:
# print " changing name from", name, "to", restraints['_chem_comp_atom'][iat][0]
sane_H_mol.GetAtomWithIdx(iat).SetProp('name', restraints['_chem_comp_atom'][iat][0]);
return restraints
def score_and_print_tautomers(mol, comp_id, output_postfix, do_drawings):
results = tautomer.enumerate_tautomers(mol)
for i in range(len(results)):
m = results[i]
s = Chem.MolToSmiles(m)
print "comp_id :", comp_id, ": SMILES", s, 'score:', tautomer.tautomer_score(m)
if do_drawings:
file_name = comp_id + '-tautomer-' + str(i)
file_name += '-' + options.output_postfix + '.png'
n = m.GetNumConformers()
conf_id = 0
if n == 0:
conf_id = AllChem.Compute2DCoords(m)
conf = m.GetConformer(conf_id)
if conf.Is3D():
mol_for_drawing = Chem.RemoveHs(m, implicitOnly=False)
conf2D_id = AllChem.Compute2DCoords(mol_for_drawing)
make_picture_to_file(mol_for_drawing, conf2D_id, file_name)
else:
make_picture_to_file(m, -1, file_name)
if __name__ == "__main__":
def checked_mkdir(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
if os.path.isdir(dirname):
pass # this happens most of the time, I imagine
else:
print 'Stop:: File', dirname, 'exists but is not a directory'
def smiles_and_name_from(smi_raw):
extension = os.path.splitext(smi_raw)[1]
smiles_string = ''
name=''
if extension == '.smi' or extension == '.smiles':
if not os.path.exists(smi_raw):
print "File not found:", smi_raw
exit(1)
else:
smiles_string,name = get_smiles_from_file(smi_raw)
else:
smiles_string = smi_raw
return smiles_string,name
parser = OptionParser(usage='pyrogen [options] file-or-SMILES'+
'\n if file-or-SMILES has extension ".smi" or ".smiles" ' +
'then it is treated as a file')
parser.add_option("-c", "--mmcif", dest="mmcif_file_name",
help="Make restraints from input mmcif FILE", metavar="FILE")
parser.add_option("-m", "--mol", dest="sdf_file",
help="Make restraints from input sdf/mol FILE", metavar="FILE")
parser.add_option("-r", "--residue-type", dest="comp_id", default='default',
help="Create restraints for this type. Default is LIG")
parser.add_option("-4", "--quartet-planes", dest="quartet_planes",
default=False,
help="Use 4-atom plane restraints,\n " +
"forces --quartet-hydrogens", action="store_true")
parser.add_option("-H", "--quartet-hydrogens", dest="quartet_hydrogen_planes",
default=False,
help="Use 4-atom hydrogen plane restraints",
action="store_true")
parser.add_option("-n", "--no-mogul", dest="use_mogul",
default=True, action="store_false",
help='Don\'t run CSD Mogul to update bond and angle restraints')
parser.add_option("-N", '--name', dest='compound_name', default=False,
help='Compound name')
parser.add_option('-S', '--smiles', dest="show_smiles",
default=False, action="store_true", help="Write the SMILES for the input molecule")
parser.add_option("-t", "--tautomers", dest="show_tautomers",
default=False, action="store_true",
help='Show SMILES for tautomers, don\'t generate restraints')
parser.add_option("-T", '--tmp-directory', dest='mogul_dir',
help='Directory into which the tmp files (e.g. for mogul) are written',
default='pyrogen-mogul')
parser.add_option("-d", '--directory', dest='output_dir',
help='Directory into which the output files (e.g. mmCIF and PDB) are written',
default='.')
parser.add_option('-o', '--output-postfix', default='pyrogen',
dest='output_postfix',
help='string to add to output file names, default is "pyrogen"')
parser.add_option('-p', '--picture', dest='drawing',
help='Additionally output a chemical diagram PNG',
action='store_true', default=False)
parser.add_option('-v', '--version', dest='show_version', default=False,
action='store_true', help='Print version information')
parser.add_option('-M', '--MMFF', dest='use_mmcif', default=False,
action='store_true', help='Use MMFF fallbacks for bonds and angles')
parser.add_option('-a', '--no-match-vs-reference-dictionaries', default=False,
action='store_true', dest='no_match_names_flag',
help="Don't match atom names vs. dictionary molecules (default False)")
parser.add_option('-R', '--reference-dictionary-files', dest='dict_files_for_names_match',
help='Try to match the atom names of the output molecule '+
'to this dictionary in these files (comma-separated list)', default=False)
parser.add_option('-C', '--reference-dictionary-comp-ids', dest='comp_id_list_for_names_match',
help='Try to match the atom names of the output molecule to these comp-ids' +
' (comma-separated list)',
default=False)
parser.add_option('-w', '--wwPDB', default=False, dest="wwPDB", action="store_true",
help='Fetch the wwPDB ligand definition and use that')
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="print less messages")
(options, args) = parser.parse_args()
# print 'DEBUG:: options:', options
if options.show_version:
print 'pyrogen-' + pyrogen_version, "revision", coot_svn_repo_revision.revision_number()
comp_id = options.comp_id
if options.comp_id == 'default':
comp_id = 'LIG'
if options.mmcif_file_name != None:
if options.comp_id == 'default':
comp_id = 'TRY_ALL_COMP_IDS'
file_name_stub = comp_id + '-' + options.output_postfix
if options.output_dir != ".":
file_name_stub = os.path.join(options.output_dir, file_name_stub)
pdb_out_file_name = file_name_stub + '.pdb'
mmcif_restraints_out_file_name = file_name_stub + '.cif'
# this is a bit ugly, perhaps. this value is inspected inside
# the following functions
#
if options.use_mogul == False:
run_mogul = False
if run_mogul:
if len(options.mogul_dir) > 0:
if options.mogul_dir[0] == '-':
print 'Stop:: you probably didn\'t mean that you wanted',options.mogul_dir, 'as your tmp directory.'
exit(1)
checked_mkdir(options.mogul_dir)
if options.show_tautomers or options.show_smiles:
# ------------------------ Tautomers and SMILES ---------------------------------------------
mol = False
if len(args) > 0:
smi_raw = args[0]
smiles,compound_name = smiles_and_name_from(smi_raw)
mol = Chem.MolFromSmiles(smiles)
else:
if options.sdf_file != None:
mol = Chem.MolFromMolFile(options.sdf_file)
else:
if options.mmcif_file_name != None:
types = pysw.types_from_mmcif_dictionary(options.mmcif_file_name)
print '-- tautomer mode: mmcif file types:', types
for type in types:
mol_local = pyrogen_boost.rdkit_mol_chem_comp_pdbx(options.mmcif_file_name, type)
score_and_print_tautomers(mol_local, type, options.output_postfix, options.drawing)
if mol:
if options.show_tautomers:
score_and_print_tautomers(mol, comp_id, options.output_postfix, options.drawing)
if options.show_smiles:
s = Chem.MolToSmiles(mol);
print s
else:
# ------------------------ dict-build-mode ---------------------------------------------------
mmcif_file_name = options.mmcif_file_name
# shall we go get the dictionary?
if options.wwPDB:
mmcif_file_name = get_pdbe_cif_for_comp_id(comp_id)
if os.path.isfile(mmcif_file_name):
pass # good
else:
print "Missing downloaded file for comp-id:", comp_id
exit(2)
# JED mode for hydrogen planes
#
quartet_hydrogen_planes = options.quartet_hydrogen_planes
if options.quartet_planes:
quartet_hydrogen_planes = True
match_names_flag = True
if options.no_match_names_flag:
match_names_flag = False
if mmcif_file_name:
mol_pairs = make_restraints_from_mmcif_dict(mmcif_file_name,
comp_id,
options.mogul_dir,
options.output_dir,
options.output_postfix,
options.quartet_planes,
quartet_hydrogen_planes,
options.use_mmcif,
pdb_out_file_name,
mmcif_restraints_out_file_name)
# this needs to be in a try block, I suppose, for example if the mmcif file
# does not exist.
for mol_info in mol_pairs:
(mol, comp_id) = mol_info
if not mol:
print 'No molecule'
else:
# Happy path
if options.drawing:
# make_picture() by default draws the first conformer in the given molecule.
# For mol, that is a 3D conformer. We want to draw a nice 2D diagram
#
mol_for_drawing = Chem.RemoveHs(mol, implicitOnly=False)
conf2D_id = AllChem.Compute2DCoords(mol_for_drawing)
make_picture(mol_for_drawing, conf2D_id, comp_id, options.output_postfix)
else:
if options.sdf_file != None:
(mol, results) = make_restraints_from_mdl(options.sdf_file, comp_id,
options.mogul_dir, file_name_stub,
pdb_out_file_name,
mmcif_restraints_out_file_name,
options.quartet_planes,
quartet_hydrogen_planes,
options.use_mmcif,
match_names_flag,
options.comp_id_list_for_names_match,
options.dict_files_for_names_match)
if options.drawing:
make_picture(mol, -1, comp_id, options.output_postfix)
else:
if len(args) > 0:
smi_raw = args[0]
smiles,compound_name_from_file = smiles_and_name_from(smi_raw)
compound_name=False
if len(compound_name_from_file) > 0:
compound_name = compound_name_from_file
if isinstance(options.compound_name, basestring):
compound_name = options.compound_name
status = make_restraints_from_smiles(smiles, comp_id, compound_name,
options.mogul_dir, file_name_stub,
pdb_out_file_name,
mmcif_restraints_out_file_name,
options.quartet_planes,
quartet_hydrogen_planes,
options.use_mmcif,
match_names_flag,
options.comp_id_list_for_names_match,
options.dict_files_for_names_match)
if options.drawing:
mol = Chem.MolFromSmiles(smiles)
make_picture(mol, -1, comp_id, options.output_postfix)
|
tectronics/coot
|
pyrogen/pyrogen.py
|
Python
|
gpl-3.0
| 32,488
|
[
"RDKit"
] |
c996e124e674d7965cadde0de14338c77e78ed332817da4dbe9e6c7f1bec6ae8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s
short_description: Manage Kubernetes (K8s) objects
version_added: "2.6"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates or vault-encrypted files.
- Access to the full range of K8s APIs.
- Use the M(k8s_facts) module to obtain a list of items about an object of type C(kind)
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
options:
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
- For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
want to use C(merge) if you see "strategic merge patch format is not supported"
- See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- Requires openshift >= 0.6.2
- If more than one merge_type is given, the merge_types will be tried in order
- If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
is simply C(strategic-merge).
choices:
- json
- merge
- strategic-merge
type: list
version_added: "2.7"
wait:
description:
- Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
received the request
- Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
- For resource kinds without an implementation, C(wait) returns immediately.
default: no
type: bool
version_added: "2.8"
wait_timeout:
description:
- How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
default: 120
version_added: "2.8"
validate:
description:
- how (if at all) to validate the resource definition against the kubernetes schema.
Requires the kubernetes-validate python module
suboptions:
fail_on_error:
description: whether to fail on validation errors.
required: yes
type: bool
version:
description: version of Kubernetes to validate against. defaults to Kubernetes server version
strict:
description: whether to fail when passing unexpected properties
default: no
type: bool
version_added: "2.8"
append_hash:
description:
- Whether to append a hash to a resource name for immutability purposes
- Applies only to ConfigMap and Secret resources
- The parameter will be silently ignored for other resource kinds
- The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
the generated hash and append_hash=no)
type: bool
version_added: "2.8"
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Create a Service object by reading the definition from a file
k8s:
state: present
src: /testing/service.yml
- name: Remove an existing Service object
k8s:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s:
state: present
src: /testing/deployment.yml
- name: >-
Read definition file from the Ansible controller file system.
If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
k8s:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') }}"
- name: fail on validation errors
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') }}"
validate:
fail_on_error: yes
- name: warn on validation errors, check for unexpected properties
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') }}"
validate:
fail_on_error: no
strict: yes
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when multiple yaml documents are passed to src or resource_definition
returned: when resource_definition or src contains list of objects
type: list
duration:
description: elapsed time of task in seconds
returned: when C(wait) is true
type: int
sample: 48
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/clustering/k8s/k8s.py
|
Python
|
gpl-3.0
| 7,383
|
[
"Galaxy"
] |
c5da5716a236ab9d279e0e35f5d385d0b6a5ed3ce19c4122292436d2207c8417
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.aca_plan2018_show_response import ACAPlan2018ShowResponse
class TestACAPlan2018ShowResponse(unittest.TestCase):
""" ACAPlan2018ShowResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testACAPlan2018ShowResponse(self):
"""
Test ACAPlan2018ShowResponse
"""
model = vericred_client.models.aca_plan2018_show_response.ACAPlan2018ShowResponse()
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_aca_plan2018_show_response.py
|
Python
|
apache-2.0
| 10,097
|
[
"VisIt"
] |
74f538c39329b749acefe8461dfb3fa7e4f4047c9297befc4bdd3edc76540c9b
|
LICENSE_STRING = """
Snap Dragon: A Picture Sorting Program
Copyright (C) 2011 Isaac Muttschall
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version
3 of the License, or any later version.
This program is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public
License along with this program. If not, see
<http://www.gnu.org/licenses/>.
"""
# Snap Dragon
# Version 1.3.2
# Isaac Muttschall
# with Linux coding contibutions from Brian M.
# with Mac coding contributions from Brad C.
# with operational feedback from Steph M, Shannon H, Brian M, Brad C, Brad W, Trisha D and many others.
"""
Previous Revision Changes:
See Previous_Revision_Changes.txt
Current Revision Changes:
Update News
Picture Design:
Design better welcome pic GIMP? PENDING
Design Find pics picture GIMP? PENDING
Find Pics:
Add ability to find pictures by tags PENDING
Make quick links multi level POSTPONED
Takes too long needs better algorithm
General:
Add skip to number in list feature FINISHED
Anitmate off feature FINISHED
Startup:
Fix Snap dragon link to point to snap dragon Page FINISHED
Bug Fixes
Fix time bug in find pics FINISHED
Fix bug with finding pictures taken by date FINISHED
Fix save with effect bug FINISHED
Fix remove feature FINISHED
Fix Tag Setting Buggyness PENDING
Future Revision Changes:
Find Pics:
Add Saving pics from findpics window PENDING
Add watching folders for picture changes PENDING
Address memory issues? PENDING
Add Loading screen on startup when loading settings and tags PENDING
Make Tags dictionary ( tags rework ) PENDING
Fix Installer PENDING
Add a tag displaying mode PENDING
Add a presentationmode PENDING
Add topbar with pictures of save albums to enable drag/drop picture PENDING
sorting for future compatability with keyboardless interfaces
This requires a canvas to do
Add Cropping ability/crop mode PENDING
This requires changing to a canvas
Pre load pictures for smoother operation PENDING
Needs Threads to make any kind of difference
Make Mac/Linux App file PENDING
Add a toolbar for quick operations PENDING
Add Load/Save/ fix New Features PENDING
Add ability to make collages and save them PENDING
Add Facial Recognition capabilities PENDING
pyfaces is a viable option
Make custom widgets all over the program for a more professional PENDING
look
Add Fullscreen Ability PENDING
Look into Capabilities for raw pics PENDING
It appears raw pics are not readily readable using PIL. Perhaps
I can convert it to another image type just to display it, but
then copy the actual raw type when the user saves. Removed for
now
"""
#*************************************************************************************************#
# IMPORTS #
#*************************************************************************************************#
if( True ):
#*****************************************************************************#
# os Imports
#*****************************************************************************#
from os import getcwd, chdir, path, makedirs, listdir, mkdir
from os import remove, walk, system, access, R_OK
from FindPicsWindow import FindPicsWindow, GetInput
from TutorWindow import TutorWindow
from ChangeSavedWindow import ChangeSavedWindow
from TracebackErrorWindow import TracebackErrorWindow
from OptionWindow import OptionWindow
from HelpWindow import HelpWindow
from StartupWindow import StartupWindow
#*****************************************************************************#
# subprocess Imports
#*****************************************************************************#
from subprocess import Popen, call
#*****************************************************************************#
# sys & traceback Imports
#*****************************************************************************#
import sys
from sys import exit
from traceback import format_exc
#*****************************************************************************#
# Tkinter Imports
#*****************************************************************************#
from Tkinter import Tk, Toplevel
# Widgets
from Tkinter import Button, Checkbutton, Entry, Text, Frame, Label
from Tkinter import Listbox, Menu, Scrollbar, Text, Canvas, LabelFrame
# Constants
from Tkinter import END, LEFT, RIGHT, BOTTOM, TOP, BOTH, VERTICAL
from Tkinter import Y, X, N, S, E, W, NW
from Tkinter import DISABLED, NORMAL, ACTIVE, SUNKEN, MULTIPLE, EXTENDED
from Tkinter import StringVar, BooleanVar
from ttk import Combobox, Progressbar, Treeview
from tkFont import Font, BOLD
#*****************************************************************************#
# tkFileDialog Imports
#*****************************************************************************#
from tkFileDialog import askdirectory, asksaveasfilename, askopenfilename, Directory
#*****************************************************************************#
# tkMessageBox Imports
#*****************************************************************************#
from tkMessageBox import showinfo, askyesno, showerror, showwarning
#*****************************************************************************#
# Image, ImageTk, ImageFilter & ImageOps Imports
#*****************************************************************************#
from Image import open as PILopen # rename to distinguish from other open
from Image import ANTIALIAS, BICUBIC
from ImageTk import PhotoImage
from ImageFilter import BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE
from ImageFilter import EMBOSS, FIND_EDGES, SMOOTH, SMOOTH_MORE, SHARPEN
from ImageOps import grayscale, mirror, invert
from PIL import Image
from PIL.ExifTags import TAGS
#*****************************************************************************#
# copy & shutil Imports
#*****************************************************************************#
from copy import deepcopy
from shutil import copyfile, move, rmtree
#*****************************************************************************#
# datetime, time & types Imports
#*****************************************************************************#
from datetime import datetime
from datetime import date
from datetime import timedelta
from time import clock, sleep, strftime
from types import MethodType
from getpass import getuser
from random import randint
from xml.dom.minidom import Document
#*****************************************************************************#
# OS api toolkits Imports
# Support here for Windows, Mac, Linux os api commands
#*****************************************************************************#
try:
from win32api import GetSystemMetrics
WIDTH = GetSystemMetrics(0)
HEIGHT = GetSystemMetrics(1)
except ImportError:
# Mac import for screen size
try:
from AppKit import NSScreen
WIDTH = [screen.frame().size.width for screen in NSScreen.screens()][0]
HEIGHT = [screen.frame().size.height for screen in NSScreen.screens()][0]
except ImportError:
# general size import
try:
wh = os.system("xrandr | grep \* | cut -d' ' -f4")
print "wh", wh
wh = wh.split( "x" )
WIDTH = wh[ 0 ]
HEIGHT = wh[ 1 ]
except Exception, e:
WIDTH = 400
HEIGHT = 300
#*************************************************************************************************#
# DEBUG #
#*************************************************************************************************#
debug = { "trace": False, "dump": False, "timing": False, "tlist": [] }
debug_file = None
#*************************************************************************************************#
# CONSTANTS #
#*************************************************************************************************#
REV = "1.3.2"
PROG_DIR = getcwd()
SAVED_DIR = path.join( PROG_DIR , "Saved Sessions" )
LOCAL_DIR = path.join( PROG_DIR , "Local" )
ICON_DIR = path.join( PROG_DIR , "Icons" )
HELP_DIR = path.join( LOCAL_DIR, "Help Files" )
WELCOME_IM = path.join( ICON_DIR , "Welcome.bmp" )
USER = getuser()
DOC_AND_SET_DIR = "C:\\Documents and Settings\\" + USER + "\\"
FAR_LEFT = 1
FAR_RIGHT = 2
MIDDLE = 4
MAXIMUM_FONT_WEIGHT = 20
FIND_PICS = "find_pics"
try: #This Makes the Icon work in Linux YAY!!!
ICON_FILENAME = path.join( ICON_DIR , "Snap_Dragon.ico" )
except Exception, e:
ICON_FILENAME = path.join( ICON_DIR , "Snap_Dragon.xbm" )
ICON_FILENAME = '@' + ICON_FILENAME
REM_FILENAME = path.join( LOCAL_DIR, "Settings.dat" )
DEBUG_FILENAME = path.join( LOCAL_DIR, "debug_notes.txt" )
START_FILENAME = path.join( LOCAL_DIR, "startup.dat" )
TAG_XML_NAME = "tags.xml"
TAG_XML_PATH = path.join( LOCAL_DIR, TAG_XML_NAME )
DEFAULTS = { "DEFAULT0": path.join( PROG_DIR, "Save 0" ),
"DEFAULT1": path.join( PROG_DIR, "Save 1" ),
"DEFAULT2": path.join( PROG_DIR, "Save 2" ),
"DEFAULT3": path.join( PROG_DIR, "Save 3" ),
"DEFAULT4": path.join( PROG_DIR, "Save 4" ),
"DEFAULT5": path.join( PROG_DIR, "Save 5" ),
"DEFAULT6": path.join( PROG_DIR, "Save 6" ),
"DEFAULT7": path.join( PROG_DIR, "Save 7" ),
"DEFAULT8": path.join( PROG_DIR, "Save 8" ),
"DEFAULT9": path.join( PROG_DIR, "Save 9" ),
}
#*************************************************************************************************#
# CLASSES #
#*************************************************************************************************#
#******************************************************************************
class TagSelector( Toplevel ):
def __init__( self, curr_tags ):
Toplevel.__init__( self )
self.title( "Add tag to picture" )
self.iconbitmap( ICON_FILENAME )
self.geometry( "+100+50" )
self.ret_list = []
self.curr_tags = curr_tags
self.curr_tags.sort()
fr_top = Frame( self )
fr_left = Frame( fr_top )
self.tag_in = Entry( fr_left, width=20 )
self.tag_in.bind( "<Return>", self.AddTag )
self.tag_in.pack()
add_tag = Button( fr_left, text="Add Tag", width=15, command=self.AddTag )
add_tag.pack()
rem_tag = Button( fr_left, text="Remove Tag", width=15, command=self.RemoveTag )
rem_tag.pack()
fr_left.pack( side=LEFT )
fr_right = Frame( fr_top )
sb_right = Scrollbar( fr_right, orient=VERTICAL )
self.tag_box = Listbox( fr_right, width=20, height=20, yscrollcommand=sb_right.set )
for val in self.curr_tags:
self.tag_box.insert( END, val.lower() )
self.tag_box.bind( "<Key>", self.KeyPress )
sb_right.config( command=self.tag_box.yview )
sb_right.pack( side=RIGHT, fill=Y )
self.tag_box.pack()
fr_right.pack( side=LEFT )
fr_top.pack()
fr_bot = Frame( self )
Button( fr_bot, text="Ok", width=10, command=self.Apply ).pack( side=LEFT )
Button( fr_bot, text="Cancel", width=10, command=self.Cancel ).pack( side=LEFT )
fr_bot.pack()
self.tag_in.focus_force()
self.mainloop()
self.destroy()
def Apply( self ):
"""
Handle when the user presses the apply button
"""
if( len( self.tag_box.get( 0, END ) ) == 0 ):
self.ret_list = [ "no_tags" ]
self.ret_list = self.tag_box.get( 0, END )
self.quit()
def Cancel( self ):
"""
Handle when the user presses the cancel button
"""
self.ret_list = self.curr_tags
self.quit()
def AddTag( self, *args ):
tag_in = self.tag_in.get().lower()
tags = self.tag_box.get( 0, END )
if( tag_in not in tags ):
add_list = []
add_list.append( tag_in )
for tag in tags:
add_list.append( tag.lower() )
self.tag_box.delete( 0, END )
add_list.sort()
for tag in add_list:
self.tag_box.insert( END, tag )
self.tag_in.delete( 0, END )
self.tag_in.focus_force()
else:
showwarning( "Tag Duplicate", "Already adding that tag." )
self.tag_in.delete( 0, END )
self.tag_in.focus_force()
def RemoveTag( self ):
add_list = []
self.tag_box.delete( ACTIVE )
for tag in self.tag_box.get( 0, END ):
add_list.append( tag )
self.tag_box.delete( 0, END )
add_list.sort()
for tag in add_list:
self.tag_box.insert( END, tag )
self.tag_box.select_set( ACTIVE )
def KeyPress( self, event ):
if( event.keysym == "Delete" ):
self.RemoveTag()
def ReturnVal( self ):
return( self.ret_list )
class BaseWindow( object ):
"""
Base window for all non-main windows
"""
non_main_root = None
def __init__( self, title="Snap Dragon", icon=ICON_FILENAME, geo="+200+200" ):
self.non_main_root = Toplevel()
self.non_main_root.title( title )
self.non_main_root.iconbitmap( icon )
self.non_main_root.geometry( geo )
class NewsWindow( BaseWindow ):
"""
Window for displaying information about new program features
"""
non_main_root = None
cb1 = None
canc = None
orig = None
opt = None
def __init__( self, current_options ):
"""
Initialize window settings
"""
self.canc = True
self.orig = current_options
self.opt = deepcopy( self.orig )
super( NewsWindow, self ).__init__( title="News", icon=ICON_FILENAME, geo='+200+200' )
features = [ "-A quick start window has been added #\n",
"-The Snap Dragon Banner had been redesigned\n",
"-The Find Pics Button has been redesigned\n",
"-Adding Tags to pictures has been added\n",
"-The Python Bouquet like now points to the Snap Dragon page\n",
"-Added the ability to skip to a certain picture number ( ctrl-g )\n",
"-fixed bug with finding date taken in find pics\n",
"-fixed bug with finding pictures by date in find pics\n",
"-fixed bug preventing saving pictures with effects\n",
"-fixed bug preventing pictures from being removed from current list\n",
"-fixed bug when re-entering tag additions\n",
]
fr1 = Frame( self.non_main_root )
fr2 = Frame( self.non_main_root )
fr3 = Frame( self.non_main_root )
Label( fr1, text="News:\nHere are some new things added in this edition of Snap Dragon" ).pack( side=LEFT )
fr1.pack()
s = Scrollbar( fr2 )
t = Text( fr2 )
t.focus_set()
t.pack( side=LEFT, fill=Y )
s.pack( side=LEFT, fill=Y )
s.config( command=t.yview )
t.config( yscrollcommand=s.set )
for i in range( len( features ) ):
t.insert( END, features[ i ] )
t.config( state=DISABLED )
fr2.pack()
cb1 = Checkbutton( fr3, text="Display on Startup?" )
cb1.bind( "<ButtonPress-1>", self.toggle_news )
if( self.opt[ "news_start" ] ):
cb1.select()
cb1.pack( side=LEFT )
Button( fr3, text="Cancel", command=self.cancel ).pack( side=LEFT )
Button( fr3, text="Apply", command=self.apply ).pack( side=LEFT )
fr3.pack()
self.non_main_root.mainloop()
self.non_main_root.destroy()
def apply( self ):
"""
Handle when the user presses the apply button
"""
self.canc = False
self.non_main_root.quit()
def cancel( self ):
"""
Handle when the user presses the cancel button
"""
self.opt = deepcopy( self.orig )
self.canc = True
print "quitting"
self.non_main_root.quit()
def toggle_news( self, event ):
if( self.opt[ "news_start" ] == 1 ):
self.opt[ "news_start" ] = 0
else:
self.opt[ "news_start" ] = 1
# For Testing
class TransparentWindow( Toplevel ):
def __init__( self, w, h, x, y ):
Toplevel.__init__( self )
if( x < 0 ):
x = 0
if( y < 0 ):
y = 0
print x, y
self.geometry( "%sx%s+%s+%s" % ( w+5, h+5, x, y+49 ) )
self.focus_force()
self.overrideredirect( True )
self.resizable( False, False )
#self.wm_attributes( "-topmost", True )
self.attributes( "-alpha", 0.6 )
self.bind( "<ButtonRelease-1>", self.B1Release )
bg = 'white'
self.config( bg=bg )
#self.Frame = Tk.Frame( self, bg=bg )
#self.Frame.pack()
''' Exits the application when the window is right clicked. '''
#self.Frame.bind('<Button-3>', self.exit)
''' Changes the window's size indirectly. '''
#self.Frame.configure(width=162, height=100)
self.mainloop()
def B1Release( self, event ):
self.destroy()
class MainWindow():
"""
class for the main program window
"""
root = None # Tkinter window handle
menubar = None # Tkinter menu handle
popup = None # Tkinter right click menu
im_id = 0 # Index for image list
im_list = [] # List to store information about images
last_width = WIDTH # Keep track of width changes
last_height = HEIGHT # Keep track of height changes
pic_dir = "" # The directory to get pics from
save_dir = ""
my_options = None # Dictionary for keeping track of options from options window
Saved = None # Tell if program has been saved since being changed
im = None # handle on current image
debug_file = None # handle on debug file
im_set = {} # settings for past images
project_name = ""
speed_test_str = None # string for holding speed test output
last_step = "START"
save_dirs = []
err = False # Used for tracking program errors Tkinter sometimes handles them automatically
canv_im = None # Handle on canvas
canv_im_id = None # Handle on canvas image or current image
last_x = None # Last position in motion animation
animate_direction = LEFT # Direction of animation
animate_speed = 10 # steps to take to animate to target location
abort_animation = False # Flag for aborting animation
animating = False # Flag indication a picture is currently animating
animated = False # Flag indicating that the current picture has completed animation
animate_sleep = 0.025 # time to sleep in between animation steps
image1 = None
w = 0 # width of the window
h = 0 # height of the window
def __init__( self ):
"""
initialize default settings and window with tkinter widgets
"""
# open debug file
self.debug_file = open( DEBUG_FILENAME, "a" )
self.my_options = { "bmp" : 1,
"jpg" : 1,
"png" : 1,
#"tiff" : 1,
#"raw" : 1,
"gif" : 1,
"all_ext" : 1,
"TB_resize" : 100,
"CB_resize" : 1,
"speed_test" : 0,
"news_start" : 1,
"animations" : 1,
}
temp = DEFAULTS.keys()
temp.sort()
for thing in temp:
self.save_dirs.append( DEFAULTS[ thing ] )
self.root = Tk()
self.LoadSettings()
self.ReadStartup()
self.BindEvents()
self.InitMenuBar()
self.InitCanvas()
try:
self.root.state( 'zoomed' )
except:
self.root.wm_state( 'normal' )
self.root.title( "Snap Dragon " + REV )
self.root.iconbitmap( ICON_FILENAME )
self.root.update()
self.root.geometry( '%dx%d+%d+%d'%( WIDTH-100, HEIGHT-100, +50, +30 ) )
self.root.update()
self.MakeDirs()
#*****************************************************************************#
# Initialization #
#*****************************************************************************#
def BindEvents( self ):
"""
Initialize event bindings
"""
self.root.bind( "<Left>" , self.LeftButton )
self.root.bind( "<Right>" , self.RightButton )
self.root.bind( "<Control-Key>" , self.CTRLInput )
self.root.bind( "<Alt-Key>" , self.ALTInput )
self.root.bind( "<Shift-Key>" , self.ShiftInput )
self.root.bind( "<Control-Right>", self.CTRLRight )
self.root.bind( "<Control-Left>" , self.CTRLLeft )
self.root.bind( "<Key>" , self.HandleInput )
self.root.bind( "<Configure>" , self.WindowEvent )
self.root.bind( "<Button-3>" , self.PopupEvent )
self.root.bind( "<ButtonPress-1>", self.B1Press )
self.root.bind( "<B1-Motion>" , self.Motion )
self.root.bind( "<ButtonRelease-1>", self.B1Release )
self.root.protocol( "WM_DELETE_WINDOW", self.ExitGracefully )
def NewFindPics( self ):
findpicwin = FindPicsWindow( deepcopy( self.my_options ), deepcopy( self.im_set ), self.root.winfo_width(), self.root.winfo_height() )
if( not findpicwin.canc ):
self.im_list = deepcopy( findpicwin.im_list )
self.im_id = deepcopy( findpicwin.im_id )
self.root.quit()
def InitMenuBar( self ):
"""
initialize menubar and options
Note: Order is really import in the execution of this code
"""
self.menubar = Menu( self.root )
# Order is not important here but is kept for organization purposes
filemenu = Menu( self.menubar, tearoff=0 )
sortmenu = Menu( self.menubar, tearoff=0 )
savemenu = Menu( self.menubar, tearoff=0 )
seftmenu = Menu( self.menubar, tearoff=0 )
remvmenu = Menu( self.menubar, tearoff=0 )
openmenu = Menu( self.menubar, tearoff=0 )
editmenu = Menu( self.menubar, tearoff=0 )
trvlmenu = Menu( self.menubar, tearoff=0 )
efctmenu = Menu( self.menubar, tearoff=0 )
optmenu = Menu( self.menubar, tearoff=0 )
helpmenu = Menu( self.menubar, tearoff=0 )
self.popup = Menu( self.root , tearoff=0 )
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
fm = [ ( "New" , self.NotImp ),#lambda arg="%s" % ( path.join( LOCAL_DIR, "help.pdf" ) ): self.OSComm( arg ) ),
( "Open" , self.NotImp ),
( "Save As" , self.NotImp ),
( "Find Pics" , self.NewFindPics ),
( "SEP" , 0 ),
( "Exit" , self.ExitGracefully ),
]
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
# for menus with cascaded menus inside, the innermost lists must be defined
# first and then up levels until the top level is reached
tm = [ ( "Next" , self.GoRight ),
( "Previous" , self.GoLeft ),
( "SEP" , 0 ),
( "Rotate Left 90" , self.RotateLeft ),
( "Rotate Right 90" , self.RotateRight ),
]
efm= [ ( "Grayscale" , lambda str="gray" : self.ToggleFlag( str ) ),
( "Mirror" , lambda str="mirror" : self.ToggleFlag( str ) ),
( "Invert" , lambda str="invert" : self.ToggleFlag( str ) ),
( "Blur" , lambda str="blur" : self.ToggleFlag( str ) ),
( "Contour" , lambda str="contour" : self.ToggleFlag( str ) ),
( "detail" , lambda str="detail" : self.ToggleFlag( str ) ),
( "edge" , lambda str="edge" : self.ToggleFlag( str ) ),
( "edge+" , lambda str="edge+" : self.ToggleFlag( str ) ),
( "emboss" , lambda str="emboss" : self.ToggleFlag( str ) ),
( "find edges" , lambda str="find edges" : self.ToggleFlag( str ) ),
( "smooth" , lambda str="smooth" : self.ToggleFlag( str ) ),
( "smooth+" , lambda str="smooth+" : self.ToggleFlag( str ) ),
( "sharpen" , lambda str="sharpen" : self.ToggleFlag( str ) ),
( "Clear All" , self.ClearAllEffects ),
]
sm = [ ( "1" , lambda str="1" : self.SavePic( str ) ),
( "2" , lambda str="2" : self.SavePic( str ) ),
( "3" , lambda str="3" : self.SavePic( str ) ),
( "4" , lambda str="4" : self.SavePic( str ) ),
( "5" , lambda str="5" : self.SavePic( str ) ),
( "6" , lambda str="6" : self.SavePic( str ) ),
( "7" , lambda str="7" : self.SavePic( str ) ),
( "8" , lambda str="8" : self.SavePic( str ) ),
( "9" , lambda str="9" : self.SavePic( str ) ),
( "0" , lambda str="0" : self.SavePic( str ) ),
]
smeff = [ ( "1" , lambda str="1" : self.SaveEffectPic( str ) ),
( "2" , lambda str="2" : self.SaveEffectPic( str ) ),
( "3" , lambda str="3" : self.SaveEffectPic( str ) ),
( "4" , lambda str="4" : self.SaveEffectPic( str ) ),
( "5" , lambda str="5" : self.SaveEffectPic( str ) ),
( "6" , lambda str="6" : self.SaveEffectPic( str ) ),
( "7" , lambda str="7" : self.SaveEffectPic( str ) ),
( "8" , lambda str="8" : self.SaveEffectPic( str ) ),
( "9" , lambda str="9" : self.SaveEffectPic( str ) ),
( "0" , lambda str="0" : self.SaveEffectPic( str ) ),
]
smrmv = [ ( "1" , lambda str="1" : self.RemovePic( str ) ),
( "2" , lambda str="2" : self.RemovePic( str ) ),
( "3" , lambda str="3" : self.RemovePic( str ) ),
( "4" , lambda str="4" : self.RemovePic( str ) ),
( "5" , lambda str="5" : self.RemovePic( str ) ),
( "6" , lambda str="6" : self.RemovePic( str ) ),
( "7" , lambda str="7" : self.RemovePic( str ) ),
( "8" , lambda str="8" : self.RemovePic( str ) ),
( "9" , lambda str="9" : self.RemovePic( str ) ),
( "0" , lambda str="0" : self.RemovePic( str ) ),
]
om = [
( "1" , lambda num=1 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "2" , lambda num=2 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "3" , lambda num=3 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "4" , lambda num=4 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "5" , lambda num=5 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "6" , lambda num=6 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "7" , lambda num=7 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "8" , lambda num=8 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "9" , lambda num=9 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
( "0" , lambda num=0 : self.OpenPicFolder( self.save_dirs[ num ] ) ),
]
smc = [ ( "CASCADE" , "Save" , savemenu, sm ),
( "CASCADE" , "Save w/ Effect", seftmenu, smeff ),
( "CASCADE" , "Remove" , remvmenu, smrmv ),
( "CASCADE" , "Open" , openmenu, om ),
]
em = [ ( "CASCADE" , "Move" , trvlmenu, tm ),
( "CASCADE" , "Effects", efctmenu, efm ),
( "CASCADE" , "Sort" , sortmenu, smc ),
]
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
om = [ ( "Change Saved" , self.ChangeSaved ),
( "SEP" , 0 ),
( "Settings" , self.Options ),
]
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
hm = [ ( "About" , self.About ),
( "License" , self.License ),
( "News" , self.News ),
( "SEP" , 0 ),
( "Shortcuts" , self.Help ),
( "Help Topics" , self.Tutor ),
( "Raise div 0 Error", self.Raise0Error ),
]
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
menus = [ ( "File" , filemenu , fm, self.menubar ),
( "Edit" , editmenu , em, self.menubar ),
( "Options", optmenu , om, self.menubar ),
( "Help" , helpmenu , hm, self.menubar ),
]
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
#menu initialization
for j in range( len( menus ) ):
self.BuildMenu( menus[ j ][ 0 ], menus[ j ][ 1 ], menus[ j ][ 2 ] )
if( menus[ j ][ 3 ] != None ):
menus[ j ][ 3 ].add_cascade( label=menus[ j ][ 0 ] , menu=menus[ j ][ 1 ] )
self.root.config( menu=self.menubar )
# Initialize the popup menu to be the same as the edit menu
self.popup = editmenu
def InitCanvas( self ):
self.canv_im = Canvas( self.root, width=0, height=0 )
self.canv_im.pack()
def OSComm( self, str ):
system( "%s" % str )
def BuildMenu( self, name, m, d ):
for i in range( len( d ) ):
if( d[ i ][ 0 ] == "SEP" ):
m.add_separator()
elif( d[ i ][ 0 ] == "CASCADE" ):
self.BuildMenu( d[ i ][ 1 ], d[ i ][ 2 ], d[ i ][ 3 ] )
m.add_cascade( label=d[ i ][ 1 ], menu=d[ i ][ 2 ] )
else:
m.add_command( label=d[ i ][ 0 ], command=d[ i ][ 1 ] )
def MakeDirs( self ):
"""
Create directories for saving pictures
"""
#self.Backup()
# Create directories if they aren't created already
chdir( PROG_DIR )
if( not path.exists( "Saved Sessions" ) ):
makedirs( "Saved Sessions" )
if( not path.exists( "Toss" ) ):
makedirs( "Toss" )
#*****************************************************************************#
# Program Operation #
#*****************************************************************************#
def RunProgram( self ):
"""
Main program action happens here
"""
# Turn on the debug output file if user has selected
self.UpdateDebug()
start = clock()
# main loop for the program
if( len( self.im_list ) != 0 ):
resize_factor = int( self.my_options[ "TB_resize" ] ) / 100.0
if( self.im != None ):
del( self.im )
# determine height and width of the window
self.w = self.root.winfo_width()
self.h = self.root.winfo_height()
# Load picture for use in canvas
self.im = self.LoadPic( self.im_list[ self.im_id ], self.w, self.h )
if( self.im == None ):
return
#delete previous pictures to keep memory free
if( self.canv_im_id != None ):
self.canv_im.delete( self.canv_im_id )
# change width and height of the canvas depending on the window size
self.canv_im.config( height=self.h, width=self.w )
# Add image to convas. NOTE if animations are on, pictures are place outside visable range and then animated in
self.canv_im_id = self.canv_im.create_image( ( self.GetAnimationOffset( self.w ), self.h/2 ), image=self.im )
# if the picture has not been animated yet, animate it. If this isn't here a picture
# will be re animated during every event. Otherwise set the pictures to the middle of the window
if( not self.animated and self.my_options[ "animations" ] ):
self.AnimatePicture( self.canv_im_id, self.w/2 )
else:
self.canv_im.coords( self.canv_im_id, ( self.w/2, self.h/2 ) )
# change window title to picture name and saved status
working_title = "%s" % ( str( self.im_id + 1 ).zfill( len( str( len( self.im_list ) ) ) ) )
working_title += "/%s " % ( str( len( self.im_list ) ) )
working_title += self.im_list[ self.im_id ].filename
for place in self.im_list[ self.im_id ].saved:
working_title += " - %s" % place
self.root.title( working_title )
#self.canv_im.move( self.canv_im_id, 1, 0 )
#self.canv_im.update_idletasks()
end = clock()
print "mainloop runtime: ", end - start
self.root.mainloop()
def ExitGracefully( self ):
"""
Exit the program gracefully
"""
if( askyesno( "Exit?", "Are you sure you want to quit?" ) ):
self.debug_file.close()
self.root.destroy()
exit( 0 )
def ToggleFlag( self, str ):
"""
Toggle a flag in the image list
"""
if( len( self.im_list ) > 0 ):
if( hasattr( self.im_list[ self.im_id ], str ) and getattr( self.im_list[ self.im_id ], str ) == 0 ):
setattr( self.im_list[ self.im_id ], str, 1 )
else:
setattr( self.im_list[ self.im_id ], str, 0 )
self.root.quit()
#*****************************************************************************#
# Button Functions #
#*****************************************************************************#
def RememberSettings( self ):
"""
Remembers all the settings of all the pictures in the picture list
"""
self.im_set[ self.im_list[ self.im_id ].filename ] = self.im_list[ self.im_id ].SettingsOut()
self.SaveSettings()
def SaveSettings( self ):
"""
Saves the settings of pictures in the picture list to a file to remember later
This is based on the name of the picture
"""
chdir( PROG_DIR )
fn = open( REM_FILENAME, "w" )
for item in self.im_set:
fn.write( "%s^" % item )
#print "*" * 50
#print self.im_set[ item ]
for thing in self.im_set[ item ]:
fn.write( "%s@%s^" % ( thing, self.im_set[ item ][ thing ] ) )
fn.write( "\n" )
def ForgetSettings( self ):
"""
Forget the settings on one picture
"""
#if( askyesno( "Forget Settings", "Are you sure you want to forget the settings on this picture?" ) ):
if( self.im_list[ self.im_id ].filename in self.im_set ):
del self.im_set[ self.im_list[ self.im_id ].filename ]
self.SaveSettings()
def LoadSettings( self ):
"""
Load the settings from the data file
"""
if( path.exists( REM_FILENAME ) ):
chdir( PROG_DIR )
i = 0
fn = open( REM_FILENAME, "r" )
data_str = fn.readlines()
for line in data_str:
nlist = line.split( "^" )
self.im_set[ nlist[ 0 ] ] = {}
for item in nlist:
nlist2 = item.split( "@" )
if( len( nlist2 ) == 2 ):
if( nlist2[ 1 ].isdigit() ):
self.im_set[ nlist[ 0 ] ][ nlist2[ 0 ] ] = int( nlist2[ 1 ] )
else:
self.im_set[ nlist[ 0 ] ][ nlist2[ 0 ] ] = nlist2[ 1 ]
#*****************************************************************************#
# Picture Operation #
#*****************************************************************************#
def LoadPic( self, pic_dict, w, h ):
resize_factor = int( self.my_options[ "TB_resize" ] ) / 100.0
#if( self.image1 is not None ):
# del self.image1
# open image in list
self.image1 = PILopen( path.join( pic_dict.path, pic_dict.filename ) )
try:
if( pic_dict.rotate == 90 or pic_dict.rotate == 270 ):
self.image1.thumbnail( ( int( h*resize_factor ) , int( w*resize_factor ) ) )
else:
self.image1.thumbnail( ( int( w*resize_factor ) ,int( h*resize_factor ) ) )
except IOError:
if( askyesno( "Resize Error", "The Image %s Cannot be resized and may not display properly. Remove it from the list?" % ( pic_dict.filename ) ) ):
self.im_list.remove( self.im_list[ self.im_id ] )
return( None )
self.image1 = self.DoPhotoOps( self.image1, pic_dict )
# create label for image and place it
return( PhotoImage( self.image1 ) )
def GoLeft( self ):
"""
go backwards in the list of images to display.
NOTE: images are displayed in RunProgram. This just sets up the index
"""
if( self.im_id > 0 ):
self.im_id -= 1
else:
if( askyesno( "Wrap-Around", "You have reached the beginning of the pictures. Would you like to wrap around to the last picture?" ) ):
self.im_id = len( self.im_list ) - 1
if( self.my_options[ "animations" ] ):
# set animation variables to animate new picture
self.animate_direction = RIGHT
self.animated = False
if( self.animating ):
self.abort_animation == True
self.root.quit()
def GoRight( self ):
"""
go forwards in the list of images to display.
NOTE: images are displayed in RunProgram. This just sets up the index
"""
if( self.im_id < len( self.im_list ) - 1 ):
self.im_id += 1
else:
if( askyesno( "Wrap-Around", "You have reached the end of the pictures. Would you like to wrap around to the first picture?" ) ):
self.im_id = 0
if( self.my_options[ "animations" ] ):
# set animation variables to animate new picture
self.animate_direction = LEFT
self.animated = False
if( self.animating ):
self.abort_animation == True
self.root.quit()
def RotateRight( self ):
"""
Rotate the picture 90 degrees to the right.
NOTE: images are displayed in RunProgram. This just sets up the options
"""
if( len( self.im_list ) != 0 ):
self.im_list[ self.im_id ].rotate -= 90
self.Saved = False
if( self.im_list[ self.im_id ].rotate < 0 ):
self.im_list[ self.im_id ].rotate = 270
self.root.quit()
def RotateLeft( self ):
"""
Rotate the picture 90 degrees to the left.
NOTE: images are displayed in RunProgram. This just sets up the options
"""
if( len( self.im_list ) != 0 ):
self.im_list[ self.im_id ].rotate += 90
self.Saved = False
if( self.im_list[ self.im_id ].rotate >= 360 ):
self.im_list[ self.im_id ].rotate = 0
self.root.quit()
def DoPhotoOps( self, im, pic_d ):
"""
Handle photo manipulations on the pictures
"""
if( pic_d.rotate != 0 ):
print pic_d.rotate
im = im.rotate( pic_d.rotate )
if( pic_d.gray ):
im = grayscale( im )
if( pic_d.mirror ):
im = mirror( im )
if( pic_d.invert ):
im = invert( im )
if( pic_d.blur ):
im = im.filter( BLUR )
if( pic_d.contour ):
im = im.filter( CONTOUR )
if( pic_d.detail ):
im = im.filter( DETAIL )
if( pic_d.edge ):
im = im.filter( EDGE_ENHANCE )
if( pic_d.edgeplus ):
im = im.filter( EDGE_ENHANCE_MORE )
if( pic_d.emboss ):
im = im.filter( EMBOSS )
if( pic_d.findedges ):
im = im.filter( FIND_EDGES )
if( pic_d.smooth ):
im = im.filter( SMOOTH )
if( pic_d.smoothplus ):
im = im.filter( SMOOTH_MORE )
if( pic_d.sharpen ):
im = im.filter( SHARPEN )
self.CheckForSettings()
return( im )
def CheckForSettings( self ):
"""
Automatically handle remembering and forgeting settings
when options change
"""
exceptions = [ "filename", "path", "saved", "type" ]
for setting in self.im_list[ self.im_id ].options:
if( setting in exceptions ):
continue
else:
if( hasattr( self.im_list[ self.im_id ], setting ) and getattr( self.im_list[ self.im_id ], setting ) ):
self.RememberSettings()
return
self.ForgetSettings()
def ClearAllEffects( self ):
"""
Clear all effects for current picture
"""
exceptions = [ "filename", "path", "saved", "type" ]
for setting in self.im_list[ self.im_id ].options:
if( setting in exceptions ):
continue
else:
setattr( self.im_list[ self.im_id ], setting, 0 )
self.root.quit()
def TossPicture( self ):
"""
remove picture from viewing list
"""
if( len( self.im_list ) != 0 ):
if( askyesno( "Toss?", "Are you sure you want to remove this picture from the viewing list? This will NOT delete the picture from the hard drive." ) ):
self.im_list.remove( self.im_list[ self.im_id ] )
self.GoRight()
def GetAnimationOffset( self, w ):
"""
get the offset for a picture when it initially loads. It is
either out of sight to the right or left depending on the
direction the user is moving through the list
"""
if( self.animate_direction == LEFT and self.my_options[ "animations" ] ):
return( 1.5 * w )
elif( self.animate_direction == RIGHT and self.my_options[ "animations" ] ):
return( -1 * ( w / 2 ) )
else:
return( w / 2 )
def AnimatePicture( self, pic_id, w ):
"""
animate the picture pic_id toward the offset w
"""
# Tell other parts of the program a picture is currently being animated
self.animating = True
# determine the distance to travel to the offset w
trav_dist = w - self.canv_im.coords( self.canv_im_id )[ 0 ]
# determine the step length to take by diving by the set animation speed
ani_step = trav_dist/self.animate_speed
# Move the picture animate_speed steps to target w offset sleeping
# inbetween so the user sees the effect. This code gives the effect
# that the farther away from the taget offset, The faster the picture
# moves toward it
for i in range( self.animate_speed ):
self.canv_im.move( pic_id, ani_step, 0 )
self.canv_im.update_idletasks()
sleep( self.animate_sleep )
# Set the picture to the taget offset after animation
self.canv_im.coords( self.canv_im_id, ( w, self.h/2 ) )
# Tell other parts of the program its done animating
self.animating = False
# Tell the program the current picture has animated all the way
self.animated = True
def AnimateOff( self, left=False, right=False ):
"""
Animate the picture off the screen to the right or left
"""
# Get the current x position of the picture
coord = self.canv_im.coords( self.canv_im_id )[ 0 ]
# If the picture is to the left of center, animate it left
# otherwise animate it right
if( coord < self.w/2 or left ):
self.AnimatePicture( self.canv_im_id, self.w*-1.5 )
elif( coord > self.w/2 or right ):
self.AnimatePicture( self.canv_im_id, self.w*1.5 )
def B1Press( self, event ):
"""
Handle left click event
"""
if( len( self.im_list ) != 0 ):
self.last_x = event.x
self.orig_y = event.y
self.do_y = True
def Motion( self, event ):
"""
Make the picture move on a mouse click and drag event
"""
if( len( self.im_list ) != 0 ):
if( self.my_options[ "animations" ] and self.do_y ):
y_diff = self.orig_y - event.y
if( y_diff > 250 ):
self.do_y = False
#self.tw = TransparentWindow( self.root.winfo_width(), self.root.winfo_height(), self.root.winfo_x(), self.root.winfo_y(), )
if( self.my_options[ "animations" ] ):
ani_step = event.x - self.last_x
self.canv_im.move( self.canv_im_id, ani_step , 0 )
self.canv_im.update_idletasks()
self.last_x = event.x
def B1Release( self, event ):
"""
return the picture to its rightful location after mouse button released
"""
# if picture is dragged a within a 1/4 of the edges. animate it off the
# screen and go to the next picture
# otherwise animate it back to the center
if( len( self.im_list ) != 0 ):
if( self.my_options[ "animations" ] ):
try:
self.tw.destroy()
except Exception, e:
pass
coord = self.canv_im.coords( self.canv_im_id )[ 0 ]
if( abs( coord - self.w/2 ) >= self.w/4 ):
self.AnimateOff()
if( coord < self.w/2 ):
self.GoRight()
else:
self.GoLeft()
else:
self.AnimatePicture( self.canv_im_id, self.w/2 )
self.root.quit()
def AddTag( self ):
tags = TagSelector( self.im_list[ self.im_id ].tags ).ReturnVal()
self.im_list[ self.im_id ].tags = tags
self.im_list[ self.im_id ].SetTags()
def SkipToPic( self ):
val = GetInput( "Skip to Picture",
"Skip to which picture?\nCurrently at %s/%s" % ( self.im_id, len( self.im_list ) ) ).ReturnVal()
val = int( val ) - 1
if( val >= len( self.im_list ) or ( val < 0 ) ):
showwarning( "Incorrect Value",
"The value you entered is not within the current range of pictures.\n"
"Acceptable values are 1 - %s" % ( str( len( self.im_list ) ) ) )
elif( val == self.im_id ):
showwarning( "Already There",
"You are already at picture number %s..." % ( val ) )
else:
if( val > self.im_id ):
self.AnimateOff( left=True )
self.animate_direction = LEFT
else:
self.AnimateOff( right=True )
self.animate_direction = RIGHT
self.im_id = val
self.animated = False
self.root.quit()
#*****************************************************************************#
# Event Handlers #
#*****************************************************************************#
def LeftButton( self, event ):
"""
Handle the left arrow key being pressed
"""
self.AnimateOff( right=True )
self.GoLeft()
def RightButton( self, event ):
"""
Handle Right arrow key being pressed
"""
self.AnimateOff( left=True )
self.GoRight()
def SavePic( self, char ):
SAVE = self.save_dirs[ int( char ) ]
if not path.exists( SAVE ):
mkdir( SAVE )
copyfile( path.join( self.im_list[ self.im_id ].path, self.im_list[ self.im_id ].filename ), path.join( SAVE, self.im_list[ self.im_id ].filename ) )
if( self.im_list[ self.im_id ].saved[ 0 ] != "not saved" ):
if( not( char in self.im_list[ self.im_id ].saved ) ):
self.im_list[ self.im_id ].saved.append( char )
else:
self.im_list[ self.im_id ].saved[ 0 ] = char
self.Saved = False
self.root.quit()
def SaveEffectPic( self, char ):
SAVE = self.save_dirs[ int( char ) ]
if not path.exists( SAVE ):
mkdir( SAVE )
self.image1.save( path.join( SAVE, "Effect_" + self.im_list[ self.im_id ].filename ) , "JPEG" )
self.root.quit()
def RemovePic( self, num ):
temp_path = path.join( PROG_DIR, "Save %s" % num )
if( num in self.im_list[ self.im_id ].saved and path.exists( path.join( temp_path, self.im_list[ self.im_id ].filename ) ) ):
remove( path.join( temp_path, self.im_list[ self.im_id ].filename ) )
self.im_list[ self.im_id ].saved.remove( num )
if( len( self.im_list[ self.im_id ].saved ) == 0 ):
self.im_list[ self.im_id ].saved = [ "not saved" ]
def OpenPicFolder( self, path ):
print path
try:
call( [ "explorer", path ] ) # windows
except Exception, e:
try:
call( [ "nautilus ", path ] ) # linux flavor
except Exception, e:
try:
call( [ "dolphin ", path ] ) # linux flavor
except Exception, e:
try:
call( [ "open ", path ] ) # mac
except Exception, e:
showinfo( "Open Folder", "Can't open " + path )
def HandleInput( self, event ):
"""
Handle input from the keyboard with out the CTRL key pressed
"""
if( len( self.im_list ) > 0 ):
if( event.char.isdigit() ):
self.SavePic( event.char )
elif( event.char == 'r' ):
self.RememberSettings()
self.root.quit()
elif( event.char == 'f' ):
self.ForgetSettings()
self.root.quit()
elif( event.char == 'g' ):
self.ToggleFlag( "gray" )
self.root.quit()
elif( event.char == 'm' ):
self.ToggleFlag( "mirror" )
self.root.quit()
elif( event.char == 'i' ):
self.ToggleFlag( "invert" )
self.root.quit()
elif( event.char == 'b' ):
self.ToggleFlag( "blur" )
self.root.quit()
elif( event.char == 'c' ):
self.ToggleFlag( "contour" )
self.root.quit()
elif( event.char == 'd' ):
self.ToggleFlag( "detail" )
self.root.quit()
elif( event.char == 'e' ):
self.ToggleFlag( "edge" )
self.root.quit()
elif( event.char == 'o' ):
self.ToggleFlag( "emboss" )
self.root.quit()
elif( event.char == 'h' ):
self.ToggleFlag( "find edges" )
self.root.quit()
elif( event.char == 's' ):
self.ToggleFlag( "smooth" )
self.root.quit()
elif( event.char == 'p' ):
self.ToggleFlag( "sharpen" )
self.root.quit()
elif( event.char == 't' ):
self.AddTag()
self.root.quit()
def ShiftInput( self, event ):
"""
Handle alt-key input
"""
print ord( event.char )
shift_ord_key = [ 41, 33, 64, 35, 36, 37, 94, 38, 42, 40, 41 ]
# if entry is alt-# remove the current picture from the save folder
if( ord( event.char ) == 69 ):
self.ToggleFlag( "edge+" )
elif( ord( event.char ) == 83 ):
self.ToggleFlag( "smooth+" )
elif( ord( event.char ) == 67 ):
self.ClearAllEffects()
elif( ord( event.char ) in shift_ord_key ):
self.SaveEffectPic( str( shift_ord_key.index( ord( event.char ) ) ) )
chdir( PROG_DIR )
self.root.quit()
def ALTInput( self, event ):
"""
Handle alt-key input
"""
if( event.char ):
# if entry is alt-# remove the current picture from the save folder
if( len( self.im_list ) > 0 ):
if( ord( event.char ) <= 58 or ord( event.char ) >= 48 ):
num = str( ord( event.char ) - 48 )
self.RemovePic( num )
chdir( PROG_DIR )
self.root.quit()
def CTRLInput( self, event ):
"""
Handle input from the keyboard while the CTRL key is pressed
"""
if( event.char ):
print ord( event.char )
if( ord( event.char ) == 17 ): # ctrl-q
self.ExitGracefully()
if( ord( event.char ) == 20 ): # ctrl-t
self.TossPicture()
if( ord( event.char ) == 7 ): # ctrl-g
self.SkipToPic()
def CTRLRight( self, event ):
"""
Handle the right arrow key being press while CTRL is held down
"""
self.RotateRight()
def CTRLLeft( self, event ):
"""
Handle the left arrow key being press while CTRL is held down
"""
self.RotateLeft()
def WindowEvent( self, event ):
"""
Handle an event on the tkinter Main window. More specifically,
resize the picture accordingly if the window size changes
"""
if( str( event.widget ) == "."):
if( event.height != self.last_height or event.width != self.last_width ):
self.last_height = event.height
self.last_width = event.width
self.root.quit()
def PopupEvent( self, event ):
"""
Handle an event on mouse right click to bring up an options menu
"""
self.popup.tk_popup( event.x_root + 55, event.y_root + 10, 0 )
#*****************************************************************************#
# Window Handlers #
#*****************************************************************************#
def ChangeSaved( self ):
cw = ChangeSavedWindow( self.save_dirs )
if( not cw.canc ):
self.save_dirs = deepcopy( cw.dirs )
self.WriteStartup()
def Options( self ):
"""
Handle the menubar options being clicked.
Brings up options window and saves settings
"""
ow = OptionWindow( self.my_options )
if( not ow.canc ):
self.my_options = ow.opt
self.WriteStartup()
self.root.quit()
def News( self ):
"""
Display information about the program
"""
nw = NewsWindow( self.my_options )
if( not nw.canc ):
self.my_options = nw.opt
self.WriteStartup()
self.root.quit()
def Tutor( self ):
"""
Display information about the program
"""
tw = TutorWindow()
self.root.quit()
def Help( self ):
"""
Display information about the program
"""
hw = HelpWindow()
self.root.quit()
def WriteStartup( self ):
if( path.exists( START_FILENAME ) ):
remove( START_FILENAME )
f = open( START_FILENAME, "w" )
for item in self.my_options:
f.write( "%s#%s\n" % ( item, self.my_options[ item ] ) )
for num in range( len( self.save_dirs ) ):
f.write( "SAVE#%s#%s\n" % ( num, self.save_dirs[ num ] ) )
f.close()
def ReadStartup( self ):
start_file = open( START_FILENAME, "r" )
for line in start_file.readlines():
if( line == "\n" ):
return
temp = line.split( "#" )
if( "SAVE" in temp[ 0 ] ):
if( "DEFAULT" in temp[ 2 ] ):
temp[ 2 ] = temp[ 2 ][ : 8 ]
self.save_dirs[ int( temp[ 1 ] ) ] = DEFAULTS[ temp[ 2 ] ]
else:
self.save_dirs[ int( temp[ 1 ] ) ] = temp[ 2 ]
if( "\n" in self.save_dirs[ int( temp[ 1 ] ) ] ):
self.save_dirs[ int( temp[ 1 ] ) ] = self.save_dirs[ int( temp[ 1 ] ) ].replace( "\n", "" )
elif( not line == "" ):
self.my_options[ temp[ 0 ] ] = int( temp[ 1 ] )
start_file.close()
def About( self ):
"""
Display information about the program
"""
showinfo( "About Snap Dragon", "Snap Dragon version %s\nCreator: Isaac Muttschall\n" % REV )
def License( self ):
"""
Display information about the program
"""
LICENSE_STRING = """
Snap Dragon: A Picture Sorting Program
Copyright (C) 2011 Isaac Muttschall
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version
3 of the License, or any later version.
This program is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public
License along with this program. If not, see
<http://www.gnu.org/licenses/>.
"""
showinfo( "Snap Dragon License", LICENSE_STRING )
#*****************************************************************************#
# Program Maintenance #
#*****************************************************************************#
def NotImp( self ):
"""
display a message saying a feature has not been implemented yet
"""
showinfo( "Not Implemented", "This feature is not finished yet. Sorry..." )
def DebugTrace( self, str, ex="" ):
"""
Used for debugging only. Not in actual program implementation.
print trace and timing information during execution depending on the debug flags
"""
if( debug[ "trace" ] ):
mystr2 = "***\t\tDEBUG TRACE %s: %s\n" % ( str, ex )
print mystr2
self.debug_file.write( mystr2 )
if( debug[ "dump" ] ):
mystr3 = ""
#print "*" * 100
self.debug_file.write( "*" * 100 + "\n" )
for thing in dir( self ):
#print "class", getattr( self, thing ).__class__
if( not isinstance( getattr( self, thing ), MethodType ) ):
mystr3 += "%s: %s\n" % ( thing, getattr( self, thing ) )
#print mystr3
self.debug_file.write( mystr3 )
#print "*" * 100
self.debug_file.write( "*" * 100 + "\n" )
if( debug[ "timing" ] ):
debug[ "tlist" ].append( clock() )
mystr = "***\t\tDEBUG TIME SINCE %s %f\n\n" % ( self.last_step, debug[ "tlist" ][ len( debug[ "tlist" ] ) - 1 ] - debug[ "tlist" ][ len( debug[ "tlist" ] ) - 2 ] )
print mystr
self.debug_file.write( mystr )
self.last_step = str
def UpdateDebug( self ):
"""
Update the debug output information. A user can change this in the options menu
"""
global debug
if( self.my_options[ "speed_test" ] ):
debug[ "trace" ] = True
debug[ "timing" ] = True
else:
debug[ "trace" ] = False
debug[ "timing" ] = False
#*****************************************************************************#
# In Progress #
#*****************************************************************************#
def NewProject( self ):
filetypes = [ ( 'Snap Dragon File', '*.sdg' ),
( 'Any File', '*.*' ) ]
temp = asksaveasfilename( defaultextension="sdg", filetypes=filetypes, initialdir=SAVED_DIR )
if( temp != "" ):
sp = path.split(temp)
self.save_dir = path.join( *sp[:-1] )
self.project_name = sp[-1]
if( path.exists( self.save_dir ) ):
if( askyesno( "Clear Project?", "A project already exists wtih that name. Would you like to save over it?" ) ):
rmtree( self.save_dir )
mkdir( self.save_dir )
self.Backup()
def Backup( self ):
"""
BAckup pictures in another directory for later use
"""
if( self.project_name != "" ):
if( askyesno( "Clear Folders?", "Do you want to clear all folders and start sorting from scratch? Current files will be saved to backup folder." ) ):
k = 1
while True:
backup = path.join("backup","%s ( %s )"%( str(datetime.now())[:10], str(k) ))
if not path.exists( backup ):
makedirs( backup )
break
k += 1
for i in range(10):
move("Save%s"%i, backup )
def Raise0Error( self ):
self.err = True
self.root.quit()
def SaveAs( self ):
filetypes = [ ( 'Snap Dragon File', '*.sdg' ),
( 'Any File', '*.*' ) ]
temp = asksaveasfilename( defaultextension="sdg", filetypes=filetypes, initialdir=path.join(SAVED_DIR,self.project_name) )
if( temp != "" ):
sp = path.split( temp )
self.save_dir = path.join( *sp[:-1] )
self.project_name = sp[-1]
if( path.exists( self.save_dir ) ):
if( askyesno( "Clear Project?", "A project already exists wtih that name. Would you like to save over it?" ) ):
rmtree( self.save_dir )
else:
return
mkdir( self.save_dir )
if( not path.exists( "Save 0" ) ):
showerror( "Error", "Save folder integrity has been compromised, Shutting down" )
self.ExitGracefully()
for i in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]:
move("Save %s" % i, self.save_dir )
self.PicToText()
SAVED = True
#def PicToText( self ):
# temp = ""
# f = open( path.join(self.save_dir, project_name), 'w' )
# for pic in self.im_list:
# f.write( Crypt( "@" ) )
# temp = ""
# for item in pic:
# temp = "%s,%s,%s" % ( temp, item, pic[ item ] )
# f.write( Crypt( temp ) )#"%s,%s" % ( item, pic[ item ] ) ) #Crypt( "%s,%s" % ( item, pic[ item ] ) ) )
# Currently Not Used
def File():
print "hello"
def ErrorWindow():
pop_win = Tk()
pop_win.title = "Unknown Error"
def LoadSession():
filetypes = [ ( 'im_thing File', '*.imt' ),
( 'Any File', '*.*' ) ]
open_dir = askopenfilename( defaultextension="imt", filetypes=filetypes, initialdir=SAVED_DIR )
print open_dir[ len( open_dir )-4: ]
if( open_dir[ len( open_dir )-4: ] == ".imt" ):
for i in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]:
rmtree( path.join( PROG_DIR, "Save%s"%i ) )
move( path.join(SAVED_DIR, open_dir, "Save%s"%i), PROG_DIR )
TextToPic( open_dir )
def NewSession():
if( askyesno( "New Session?", "Do you want to clear all folders/pics and start sorting from scratch??") ):
if( not SAVED ):
if( askyesno( "Save Session?", "Do you want to save you current session first?") ):
SaveSession()
for i in [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]:
rmtree( "%s\%s" % ( PROG_DIR, "Save %s" % i ) )
makedirs( "Save %s" % i )
im_list = []
im_id = 0
def Crypt( str ):
t = ""
for i in range( len( str ) ):
t = "%s%s" % ( t, chr( ord( str[ i ] ) ^ CRYPT_KEY ) )
return t
def TextToPic( fn ):
global im_list
global im_id
f = open( fn, 'r' )
im_list = []
im_id = 0
line = f.readlines()
for stuff in line:
for thing in stuff.split( Crypt( "@" ) ):
t = {}
temp = Crypt( thing ).split( ',' )
temp = temp[ 1: ]
if( len( temp ) == LEN_PIC ):
i = 0
while( i < len( temp ) ):
if( temp[ i + 1 ].isdigit() ):
t[ temp[ i ] ] = int( temp[ i + 1 ] )
else:
t[ temp[ i ] ] = temp[ i + 1 ]
i += 2
im_list.append( t )
# Currently Not Used
def ReadStartup( mw ):
start_file = open( START_FILENAME, "r" )
for line in start_file.readlines():
if( line == "\n" ):
return
temp = line.split( "#" )
if( "SAVE" in temp[ 0 ] ):
if( "DEFAULT" in temp[ 2 ] ):
temp[ 2 ] = temp[ 2 ][ : 8 ]
mw.save_dirs[ int( temp[ 1 ] ) ] = DEFAULTS[ temp[ 2 ] ]
else:
mw.save_dirs[ int( temp[ 1 ] ) ] = temp[ 2 ]
if( "\n" in mw.save_dirs[ int( temp[ 1 ] ) ] ):
mw.save_dirs[ int( temp[ 1 ] ) ] = mw.save_dirs[ int( temp[ 1 ] ) ].replace( "\n", "" )
elif( not line == "" ):
mw.my_options[ temp[ 0 ] ] = int( temp[ 1 ] )
start_file.close()
def DebugHeader():
"""
Form header to write to debug file
"""
d = ""
d += "#################################################\n"
d += "%s\n" % datetime.now().ctime()
d += "#################################################\n"
return( d )
def InitMain():
# Create Needed Files
# tags.xml
if( not path.exists( TAG_XML_PATH ) ):
f = open( TAG_XML_PATH, "w" )
doc = Document()
x = doc.createElement( "tags" )
for thing in dir( x ):
print thing
doc.appendChild( x )
f.write( doc.toprettyxml() )
f.close()
# startup.dat
if( not path.exists( START_FILENAME ) ):
f = open( START_FILENAME, "w" )
f.close()
# Settings.dat
if( not path.exists( REM_FILENAME ) ):
f = open( REM_FILENAME, "w" )
f.close()
def main():
"""
Main loop for program. Error catching happens here.
"""
InitMain()
if( debug[ "trace" ] or debug[ "timing" ] ):
if not path.exists( path.join( LOCAL_DIR, "Debug Archive" ) ):
makedirs( path.join( LOCAL_DIR, "Debug Archive" ) )
if path.isfile( DEBUG_FILENAME ):
move( DEBUG_FILENAME, path.join( LOCAL_DIR, "Debug Archive", "%s.txt"%str(datetime.now())[:18].replace(":",";") ) )
# open debug file
debug_file = open( DEBUG_FILENAME, "w" )
debug_file.write( DebugHeader() )
debug_file.close()
# initialize tkinter root things
mw = MainWindow()
nw = StartupWindow( mw, mw.my_options )
nw.pack()
mw.root.mainloop()
while( 1 ):
try:
if( mw.err ):
mw.err = False
1/0
try:
nw.forget_pack()
except:
pass
mw.RunProgram()
except Exception, e:
TracebackErrorWindow()
if (__name__ == "__main__"):
main()
|
imuttschall/Snap-Dragon
|
Snap_Dragon_1_3_2.py
|
Python
|
gpl-3.0
| 76,972
|
[
"Brian"
] |
83e6a1d9f9dbcec9fee2e3d07a17c4880816d8d7a898341c058e7dc8fb4675b6
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('nomadgram.users.urls', namespace='users')),
url(r'^images/', include('nomadgram.images.urls', namespace='images')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
bokjk/nomadgram
|
config/urls.py
|
Python
|
mit
| 1,625
|
[
"VisIt"
] |
defef6a6367e88b51be9f4316c6fa984bf6aed06c603505cb193026c0206b902
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from six import string_types
from six.moves import configparser
from ansible.parsing.splitter import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p, CONFIG_FILE = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# sections in config file
DEFAULTS='defaults'
DEPRECATED_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts'))
# generally configurable things
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None))
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas']
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# Plugin paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
|
vfulco/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 16,716
|
[
"Galaxy"
] |
52f4e5c389fd49397a5fa763f4bbc759237dbd2e8fa301006abd0c22b7ad41cf
|
"""The base file related data source object from which all MayaVi data
sources derive.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import re
from os.path import split, join, isfile
from glob import glob
# Enthought library imports.
from traits.api import List, Str, Instance, Int, Range
from traitsui.api import Group, Item, FileEditor
from apptools.persistence.state_pickler import set_state
from apptools.persistence.file_path import FilePath
# Local imports
from mayavi.core.source import Source
from mayavi.core.common import handle_children_state
######################################################################
# Utility functions.
######################################################################
def get_file_list(file_name):
""" Given a file name, this function treats the file as a part of
a series of files based on the index of the file and tries to
determine the list of files in the series. The file name of a
file in a time series must be of the form 'some_name[0-9]*.ext'.
That is the integers at the end of the file determine what part of
the time series the file belongs to. The files are then sorted as
per this index."""
# The matching is done only for the basename of the file.
f_dir, f_base = split(file_name)
# Find the head and tail of the file pattern.
head = re.sub("[0-9]+[^0-9]*$", "", f_base)
tail = re.sub("^.*[0-9]+", "", f_base)
pattern = head+"[0-9]*"+tail
# Glob the files for the pattern.
_files = glob(join(f_dir, pattern))
# A simple function to get the index from the file.
def _get_index(f, head=head, tail=tail):
base = split(f)[1]
result = base.replace(head, '')
return float(result.replace(tail, ''))
# Before sorting make sure the files in the globbed series are
# really part of a timeseries. This can happen in cases like so:
# 5_2_1.vtk and 5_2_1s.vtk will be globbed but 5_2_1s.vtk is
# obviously not a valid time series file.
files = []
for x in _files:
try:
_get_index(x)
except ValueError:
pass
else:
files.append(x)
# Sort the globbed files based on the index value.
def file_sort(x, y):
x1 = _get_index(x)
y1 = _get_index(y)
if x1 > y1:
return 1
elif y1 > x1:
return -1
else:
return 0
files.sort(key=lambda x:_get_index(x))
return files
######################################################################
# `FileDataSource` class.
######################################################################
class FileDataSource(Source):
# The version of this class. Used for persistence.
__version__ = 0
# The list of file names for the timeseries.
file_list = List(Str, desc='a list of files belonging to a time series')
# The current time step (starts with 0). This trait is a dummy
# and is dynamically changed when the `file_list` trait changes.
# This is done so the timestep bounds are linked to the number of
# the files in the file list.
timestep = Range(value=0,
low='_min_timestep',
high='_max_timestep',
enter_set=True, auto_set=False,
desc='the current time step')
base_file_name=Str('', desc="the base name of the file",
enter_set=True, auto_set=False,
editor=FileEditor())
# A timestep view group that may be included by subclasses.
time_step_group = Group(Item(name='file_path', style='readonly'),
Item(name='timestep',
defined_when='len(object.file_list) > 1')
)
##################################################
# Private traits.
##################################################
# The current file name. This is not meant to be touched by the
# user.
file_path = Instance(FilePath, (), desc='the current file name')
_min_timestep = Int(0)
_max_timestep = Int(0)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(FileDataSource, self).__get_pure_state__()
# These are obtained dynamically, so don't pickle them.
for x in ['file_list', 'timestep']:
d.pop(x, None)
return d
def __set_pure_state__(self, state):
# Use the saved path to initialize the file_list and timestep.
fname = state.file_path.abs_pth
if not isfile(fname):
msg = 'Could not find file at %s\n'%fname
msg += 'Please move the file there and try again.'
raise IOError(msg)
self.initialize(fname)
# Now set the remaining state without touching the children.
set_state(self, state, ignore=['children', 'file_path'])
# Setup the children.
handle_children_state(self.children, state.children)
# Setup the children's state.
set_state(self, state, first=['children'], ignore=['*'])
######################################################################
# `FileDataSource` interface
######################################################################
def initialize(self, base_file_name):
"""Given a single filename which may or may not be part of a
time series, this initializes the list of files. This method
need not be called to initialize the data.
"""
self.base_file_name = base_file_name
######################################################################
# Non-public interface
######################################################################
def _file_list_changed(self, value):
# Change the range of the timestep suitably to reflect new list.
n_files = len(self.file_list)
timestep = min(self.timestep, n_files)
self._max_timestep = max(n_files -1, 0)
if self.timestep == timestep:
self._timestep_changed(timestep)
else:
self.timestep = timestep
def _file_list_items_changed(self, list_event):
self._file_list_changed(self.file_list)
def _timestep_changed(self, value):
file_list = self.file_list
if len(file_list) > 0:
self.file_path = FilePath(file_list[value])
else:
self.file_path = FilePath('')
def _base_file_name_changed(self,value):
self.file_list = get_file_list(value)
if len(self.file_list) == 0:
self.file_list = [value]
try:
self.timestep = self.file_list.index(value)
except ValueError:
self.timestep = 0
|
dmsurti/mayavi
|
mayavi/core/file_data_source.py
|
Python
|
bsd-3-clause
| 6,997
|
[
"Mayavi",
"VTK"
] |
f4ee601ee7400e368ea9763b7904aad38d313ffdfea1df006a97c5c703c3ad0f
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*************************************
**espresso.analysis.MeanSquareDispl**
*************************************
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.analysis.ConfigsParticleDecomp import *
from _espresso import analysis_MeanSquareDispl
class MeanSquareDisplLocal(ConfigsParticleDecompLocal, analysis_MeanSquareDispl):
'The (local) compute autocorrelation f.'
def __init__(self, system, chainlength = None):
if chainlength is None:
cxxinit(self, analysis_MeanSquareDispl, system)
else:
cxxinit(self, analysis_MeanSquareDispl, system, chainlength)
def computeG2(self):
return self.cxxclass.computeG2(self)
def computeG3(self):
return self.cxxclass.computeG3(self)
def strange(self):
print 1
return 1
if pmi.isController:
class MeanSquareDispl(ConfigsParticleDecomp):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.analysis.MeanSquareDisplLocal',
pmiproperty = [ 'print_progress' ],
pmicall = ["computeG2", 'strange']
)
|
BackupTheBerlios/espressopp
|
src/analysis/MeanSquareDispl.py
|
Python
|
gpl-3.0
| 1,974
|
[
"ESPResSo"
] |
d6bb514fd6d8a1c08c9936e2eb9f7827240e28c9390ba7c9551e90fdc27de111
|
# coding: utf-8
from __future__ import unicode_literals
from __future__ import division
"""
Evaluate the defect concentration based on composition, temperature,
and defect energies using "Dilute Solution Model"
Reference: Phys Rev B, 63, 094103, 2001,
"Density of constitutional and thermal point defects in L12 Al3Sc",
C. Woodward, M. Asta, G. Kresse and J. Hafner.
Manual and citation for the code, DOI: 10.1016/j.cpc.2015.03.015
"""
__author__ = 'Bharat Medasani'
__version__ = "0.2"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__status__ = "Alpha"
__date__ = "6/4/14"
import math
import copy
import numpy as np
from six.moves import zip
from monty.dev import requires
from monty.fractions import gcd
try:
from sympy import Symbol, nsolve, Integer, Float, Matrix, exp, solve, Eq
sympy_found = True
except ImportError:
sympy_found = False
# physical consts
k_B=8.6173324e-5 # eV/K
# Check the inputs
def check_input(def_list):
flag = True
for defect in def_list:
if not defect:
flag = False
break
return flag
@requires(sympy_found,
"dilute_solution_model requires Sympy module. Please install it.")
def dilute_solution_model(structure, e0, vac_defs, antisite_defs, T,
trial_chem_pot = None, generate='plot'):
"""
Compute the defect densities using dilute solution model.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
generate (string): Options are plot or energy
Chemical potentials are also returned with energy option.
If energy option is not chosen, plot is generated.
Returns:
If generate=plot, the plot data is generated and returned in
HighCharts format.
If generate=energy, defect formation enthalpies and chemical
potentials are returned.
"""
if not check_input(vac_defs):
raise ValueError('Vacancy energy is not defined')
if not check_input(antisite_defs):
raise ValueError('Antisite energy is not defined')
formation_energies = {}
formation_energies['vacancies'] = copy.deepcopy(vac_defs)
formation_energies['antisites'] = copy.deepcopy(antisite_defs)
for vac in formation_energies['vacancies']:
del vac['energy']
for asite in formation_energies['antisites']:
del asite['energy']
# Setup the system
site_species = [vac_def['site_specie'] for vac_def in vac_defs]
multiplicity = [vac_def['site_multiplicity'] for vac_def in vac_defs]
m = len(set(site_species)) # distinct species
n = len(vac_defs) # inequivalent sites
# Reduce the system and associated parameters such that only distinctive
# atoms are retained
comm_div = gcd(*tuple(multiplicity))
multiplicity = [val/comm_div for val in multiplicity]
e0 = e0/comm_div
T = Float(T)
#c0 = np.diag(multiplicity)
c0 = np.diag(np.ones(n))
mu = [Symbol('mu'+i.__str__()) for i in range(m)]
# Generate maps for hashing
# Generate specie->mu map and use it for site->mu map
specie_order = [] # Contains hash for site->mu map Eg: [Al, Ni]
site_specie_set = set() # Eg: {Ni, Al}
for i in range(n):
site_specie = site_species[i]
if site_specie not in site_specie_set:
site_specie_set.add(site_specie)
specie_order.append(site_specie)
site_mu_map = [] # Eg: [mu0,mu0,mu0,mu1] where mu0->Al, and mu1->Ni
for i in range(n):
site_specie = site_species[i]
j = specie_order.index(site_specie)
site_mu_map.append(j)
specie_site_index_map = [] # Eg: [(0,3),(3,4)] for Al & Ni
for i in range(m):
low_ind = site_species.index(specie_order[i])
if i < m-1:
hgh_ind = site_species.index(specie_order[i+1])
else:
hgh_ind = n
specie_site_index_map.append((low_ind,hgh_ind))
"""
dC: delta concentration matrix:
dC[i,j,k]: Concentration change of atom i, due to presence of atom
j on lattice site k
Special case is [i,i,i] which is considered as vacancy
Few cases: dC[i,i,i] = -1 due to being vacancy special case
dC[k,k,i] = +1 due to increment in k at i lattice if i
lattice type is of different element
dC[i,k,i] = -1 due to decrement of ith type atom due to
presence of kth type atom on ith sublattice and kth type
atom specie is different from ith sublattice atom specie
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n,n,n), dtype=np.int)
for i in range(n):
for j in range(n):
for k in range(n):
if i == j and site_species[j] != site_species[k] and \
site_species[i] != site_species[k]:
dC[i,j,k] = 1
for j in range(n):
for k in range(n):
if i == k:
dC[i,j,k] = -1
for k in range(n):
for j in range(n):
for i in range(n):
if i != j:
if site_species[j] == site_species[k]:
dC[i,j,k] = 0
for ind_map in specie_site_index_map:
if ind_map[1]-ind_map[0] > 1:
for index1 in range(ind_map[0]+1,ind_map[1]):
for index2 in range(ind_map[0]):
for i in range(n):
dC[i,index1,index2] = 0
for index2 in range(ind_map[1],n):
for i in range(n):
dC[i,index1,index2] = 0
# dE matrix: Flip energies (or raw defect energies)
els = [vac_def['site_specie'] for vac_def in vac_defs]
dE = []
for i in range(n):
dE.append([])
for i in range(n):
for j in range(n):
dE[i].append(0)
for j in range(n):
for i in range(n):
if i == j:
dE[i][j] = vac_defs[i]['energy']
else:
sub_specie = vac_defs[i]['site_specie']
site_specie = vac_defs[j]['site_specie']
if site_specie == sub_specie:
dE[i][j] = 0
else:
for as_def in antisite_defs:
if int(as_def['site_index']) == j+1 and \
sub_specie == as_def['substitution_specie']:
dE[i][j] = as_def['energy']
break
dE = np.array(dE)
# Initialization for concentrations
# c(i,p) == presence of ith type atom on pth type site
c = Matrix(n,n,[0]*n**2)
for i in range(n):
for p in range(n):
c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Integer(dC[j,epi,p]) \
for j in range(n)])
flip = Integer(dC[i,epi,p]) * \
exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
c[i,p] += flip
total_c = []
for ind in specie_site_index_map:
val = 0
for i in range(*ind):
sum_i = sum([c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
total_c.append(val)
c_ratio = [total_c[-1]/total_c[i] for i in range(m)]
# Expression for Omega, the Grand Potential
omega1 = e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:])*multiplicity[i] \
for i in range(n)])
omega2 = []
fm_en_eff = []
used_dEs = []
for p_r in range(n):
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*dC[j,epi,p_r] \
for j in range(n)])
if p_r != epi and site_mu_map[p_r] == site_mu_map[epi]:
continue
if dE[epi,p_r] not in used_dEs:
omega2.append(k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu)/(k_B*T)))
fm_en_eff.append(dE[epi,p_r]-sum_mu)
used_dEs.append(dE[epi, p_r])
omega = omega1-sum(omega2)
# Compute composition range
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
comp1_min = sum(multiplicity[li:hi])/sum(multiplicity)*100-1
comp1_max = sum(multiplicity[li:hi])/sum(multiplicity)*100+1
delta = float(comp1_max-comp1_min)/120.0
yvals = []
for comp1 in np.arange(comp1_min,comp1_max+delta,delta):
comp2 = 100-comp1
y = comp2/comp1
yvals.append(y)
def reduce_mu():
omega = [e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:]) for i in range(n)])]
x = solve(omega)
return x
def compute_mus_by_search():
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
y_vect = [specie_concen[-1]/specie_concen[i] for i in range(m)]
vector_func = [y_vect[i]-c_ratio[i] for i in range(m-1)]
vector_func.append(omega)
min_diff = 1e10
mu_vals = None
c_val = None
m1_min = -20.0
if e0 > 0:
m1_max = 10 # Search space needs to be modified
else:
m1_max = 0
for m1 in np.arange(m1_min,m1_max,0.01):
m0 = mu_red[mu[0]].subs(mu[-1],m1)
try:
x = nsolve(vector_func,mu,[m0,m1],module="numpy")
except:
continue
c_val = c.subs(dict(zip(mu,x)))
#if all(x >= 0 for x in c_val):
specie_concen = []
for ind in specie_site_index_map:
specie_concen.append(sum([sum(c_val[i,:]) for i in range(*ind)]))
y_comp = [specie_concen[-1]/specie_concen[i] for i in range(m)]
diff = math.sqrt(sum([pow(abs(y_comp[i]-y_vect[i]),2) for i in range(m)]))
if diff < min_diff:
min_diff = diff
mu_vals = x
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
else:
raise ValueError()
return mu_vals
def compute_def_formation_energies():
i = 0
for vac_def in vac_defs:
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
uncor_energy = vac_def['energy']
formation_energy = uncor_energy + mu_vals[ind]
formation_energies['vacancies'][i]['formation_energy'] = formation_energy
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$V_{'+site_specie+'}$'
else:
label = '$V_{'+site_specie+'_'+str(cur_ind)+'}$'
formation_energies['vacancies'][i]['label'] = label
i += 1
i = 0
for as_def in antisite_defs:
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
uncor_energy = as_def['energy']
formation_energy = uncor_energy + mu_vals[ind1] - mu_vals[ind2]
formation_energies['antisites'][i]['formation_energy'] = formation_energy
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
formation_energies['antisites'][i]['label'] = label
i += 1
return formation_energies
# If generate option is energy compute effective formation energies
# at ideal stoichiometry and return the formation energies and chem pot.
if generate == 'energy':
if not trial_chem_pot:
mu_vals = compute_mus_by_search()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus()
formation_energies = compute_def_formation_energies()
mu_dict = dict(zip(specie_order,mu_vals))
return formation_energies, mu_dict
if not trial_chem_pot:
# Try computing mus by assuming one of the defects is dominant at 0.01
# concen. First vacancy is tried and then antisite
# Generate trial mus assuming vacancy as dominant defect
#for specie-0 at lower yval
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
li1 = specie_site_index_map[1][0]
hi1 = specie_site_index_map[1][1]
spec_mult = [sum(multiplicity[li:hi]), sum(multiplicity[li1:hi1])]
ln_def_conc = 4.60517
for i in range(li,hi):
vac_flip_en = vac_defs[i]['energy']
mu_vals = [ln_def_conc*k_B*T -vac_flip_en]
mu_vals.append((e0 - spec_mult[0]*mu_vals[0]) / spec_mult[1])
comp_ratio = yvals[0]
# Test if the trial mus are good
vector_func = [comp_ratio - c_ratio[0]]
vector_func.append(omega)
try:
mu_vals = nsolve(vector_func,mu,mu_vals)
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
except: # Go for antisite as dominant defect
mu_gs = [Symbol('mu_gs'+j.__str__()) for j in range(m)]
eqs = [mu_gs[0]-mu_gs[1] - (ln_def_conc*k_B*T-antisite_defs[i][
'energy'])]
eqs.append(spec_mult[0]*mu_gs[0] + spec_mult[1]*mu_gs[1] - e0)
x = solve(eqs, mu_gs)
#mu_names = sorted([key.name for key in x.keys()])
mu_vals = []
for key in sorted(x.keys(),key=lambda inp: inp.name):
mu_vals.append(x[key])
vector_func = [comp_ratio - c_ratio[0]]
vector_func.append(omega)
try:
mu_vals = nsolve(vector_func,mu,mu_vals)
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
except: # Go to the default option (search the space)
pass
else:
mu_vals = compute_mus_by_search()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus_by_search()
# Compile mu's for all composition ratios in the range
#+/- 1% from the stoichiometry
result = {}
i = 0
len_y = len(yvals)
failed_y, failed_i = [], []
for y in yvals:
vector_func = [y-c_ratio[0]]
vector_func.append(omega)
try:
x = nsolve(vector_func,mu,mu_vals,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
failed_y.append(y)
failed_i.append(i)
continue
result[y] = list(mu_vals)
x = None
i += 1
def get_next_mu_val(i):
if i >= len(yvals):
return None
y = yvals[i+1]
x = result.get(y,None)
if x:
mu_vals = [float(mu_val) for mu_val in x]
return mu_vals
else:
return get_next_mu_val(i+1)
def get_prev_mu_val(i):
if i <= 0:
return None
y = yvals[i-1]
x = result.get(y,None)
if x:
mu_vals = [float(mu_val) for mu_val in x]
return mu_vals
else:
return get_next_mu_val(i-1)
# Try to get better trial mus for failed cases
for j in range(len(failed_y)):
i = failed_i[j]
prev_mu_val = get_prev_mu_val(i)
if not prev_mu_val:
continue
next_mu_val = get_next_mu_val(i)
if not next_mu_val:
continue
y = failed_y[j]
vector_func = [y-c_ratio[0]]
vector_func.append(omega)
trial_mu = list(map(lambda x: float(sum(x))/len(x), \
zip(prev_mu_val,next_mu_val)))
try:
x = nsolve(vector_func,mu,trial_mu,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
continue
result[y] = mu_vals
x = None
# Alternate way of calculating trial mus for failed cases
# by taking average of trial mus at extremes.
#for j in range(len(failed_y)):
# y = yvals[0]
# prev_mu_val = result[y]
# y = yvals[-1]
# next_mu_val = result[y]
#
# trial_mu = list(map(lambda x: float(sum(x))/len(x), \
# zip(prev_mu_val,next_mu_val)))
# y = failed_y[j]
# vector_func = [y-c_ratio[0]]
# vector_func.append(omega)
# try:
# x = nsolve(vector_func,mu,trial_mu,module="numpy")
# if x:
# mu_vals = [float(mu_val) for mu_val in x]
# except:
# continue
# result[y] = list(mu_vals)
if len(result.keys()) < len(yvals)/2:
raise ValueError('Not sufficient data')
res = []
new_mu_dict = {}
# Compute the concentrations for all the compositions
for key in sorted(result.keys()):
mu_val = result[key]
total_c_val = [total_c[i].subs(dict(zip(mu,mu_val))) \
for i in range(len(total_c))]
c_val = c.subs(dict(zip(mu,mu_val)))
res1 = []
# Concentration of first element/over total concen
res1.append(float(total_c_val[0]/sum(total_c_val)))
new_mu_dict[res1[0]] = mu_val
sum_c0 = sum([c0[i,i] for i in range(n)])
for i in range(n):
for j in range(n):
if i == j: # Vacancy
vac_conc = float(exp(-(mu_val[site_mu_map[i]]+dE[i,i])/(k_B*T)))
res1.append(vac_conc)
else: # Antisite
res1.append(float(c_val[i,j]/c0[j,j]))
res.append(res1)
res = np.array(res)
dtype = [(str('x'),np.float64)]+[(str('y%d%d' % (i, j)), np.float64) \
for i in range(n) for j in range(n)]
res1 = np.sort(res.view(dtype), order=[str('x')],axis=0)
conc_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
conc_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
conc_data['x_label'] = els[0]+ " mole fraction"
conc_data['y_label'] = "Point defect concentration"
conc = []
for i in range(n):
conc.append([])
for j in range(n):
conc[i].append([])
for i in range(n):
for j in range(n):
y1 = [dat[0][i*n+j+1] for dat in res1]
conc[i][j] = y1
y_data = []
for i in range(n):
data = conc[i][i]
specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+specie+'}$'
else:
label = vac_string+specie+'_'+str(cur_ind)+'}$'
# Plot data and legend info
y_data.append({'data':data,'name':label})
for i in range(n):
site_specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
for j in range(m): # Antisite plot dat
sub_specie = specie_order[j]
if sub_specie == site_specie:
continue
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
inds = specie_site_index_map[j]
# TODO: Investigate the value below
data = np.sum([conc[ind][i] for ind in range(*inds)],axis=0)
data = data.tolist()
y_data.append({'data':data,'name':label})
conc_data['y'] = y_data
# Compute the formation energies
def compute_vac_formation_energies(mu_vals):
en = []
for vac_def in vac_defs:
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
uncor_energy = vac_def['energy']
formation_energy = uncor_energy + mu_vals[ind]
en.append(float(formation_energy))
return en
en_res = []
for key in sorted(new_mu_dict.keys()):
mu_val = new_mu_dict[key]
en_res.append(compute_vac_formation_energies(mu_val))
en_data = {'x_label':els[0]+' mole fraction', 'x':[]}
en_data['x'] = [dat[0][0] for dat in res1] # x-axis data
i = 0
y_data = []
for vac_def in vac_defs:
data = [data[i] for data in en_res]
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+site_specie+'}$'
else:
label = vac_string+site_specie+'_'+str(cur_ind)+'}$'
y_data.append({'data':data,'name':label})
i += 1
def compute_as_formation_energies(mu_vals):
en = []
for as_def in antisite_defs:
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
uncor_energy = as_def['energy']
form_en = uncor_energy + mu_vals[ind1] - mu_vals[ind2]
en.append(form_en)
return en
en_res = []
for key in sorted(new_mu_dict.keys()):
mu_val = new_mu_dict[key]
en_res.append(compute_as_formation_energies(mu_val))
i = 0
for as_def in antisite_defs:
data = [data[i] for data in en_res]
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
y_data.append({'data':data,'name':label})
i += 1
en_data['y'] = y_data
# Return chem potential as well
mu_data = {'x_label':els[0]+' mole fraction', 'x':[]}
mu_data['x'] = [dat[0][0] for dat in res1] # x-axis data
y_data = []
for j in range(m):
specie = specie_order[j]
mus = [new_mu_dict[key][j] for key in sorted(new_mu_dict.keys())]
y_data.append({'data':mus, 'name':specie})
mu_data['y'] = y_data
return conc_data, en_data, mu_data
@requires(sympy_found,
"comute_defect_density requires Sympy module. Please install it.")
def compute_defect_density(structure, e0, vac_defs, antisite_defs, T=800,
trial_chem_pot=None, plot_style="highcharts"):
"""
Wrapper for the dilute_solution_model.
The computed plot data is prepared based on plot_style.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
plot_style (string): Allowed options are
1) highcharts (default)
2) gnuplot
Returns:
The plot data is generated and returned in asked format.
"""
conc_data, en_data, mu_data = dilute_solution_model(
structure,e0,vac_defs,antisite_defs,T,
trial_chem_pot=trial_chem_pot)
if plot_style == 'highcharts':
"Energy data is ignored in this mode"
hgh_chrt_data = {}
hgh_chrt_data['xAxis'] = conc_data['x_label']
hgh_chrt_data['yAxis'] = conc_data['y_label']
series = []
x = conc_data['x']
for y_data in conc_data['y']:
y = y_data['data']
xy = zip(x,y)
xy = [list(el) for el in xy]
name = y_data['name'].strip('$')
flds= name.split('_')
def_string = flds[0]
site_string = flds[1].strip('{}')
name = def_string+"<sub>"+site_string+"</sub>"
#series.append({'data':xy, 'name':y_data['name']})
series.append({'data':xy, 'name':name})
hgh_chrt_data['series'] = series
return hgh_chrt_data
elif plot_style == 'gnuplot':
def data_to_rows(inp_data):
rows = []
labels = []
labels.append(inp_data['x_label'])
labels += [y['name'] for y in inp_data['y']]
#labels.sort()
rows.append('#'+'\t'.join(labels))
m = len(inp_data['x'])
for i in range(m):
data = []
data.append(inp_data['x'][i])
data += [y['data'][i] for y in inp_data['y']]
data = [float(x) for x in data]
rows.append('\t'.join(list(map(str,data))))
return rows
conc_rows = data_to_rows(conc_data)
en_rows = data_to_rows(en_data)
mu_rows = data_to_rows(mu_data)
return conc_rows, en_rows, mu_rows
#solute_site_preference_finder is based on dilute_solution_model and so most
#of the code is same. However differences exist in setting up and processing
#hence new function
@requires(sympy_found, "solute_site_preference_finder requires Sympy module. "\
"Please install it.")
def solute_site_preference_finder(
structure, e0, T, vac_defs, antisite_defs, solute_defs,
solute_concen=0.01, trial_chem_pot = None):
"""
Compute the solute defect densities using dilute solution model.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
T: Temperature in Kelvin
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite
defect are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
solute_defs: List of solute defect parameters in the dictionary
format. Similary to that of antisite defs, wtih solute specie
specified in substitution_specie
solute_concen: Solute concentration (in fractional value)
trial_chem_pot: Trial chemical potentials to speedup the plot
generation. Format is {el1:mu1,...}
Returns:
plot_data: The data for plotting the solute defect concentration.
"""
if not check_input(vac_defs):
raise ValueError('Vacancy energy is not defined')
if not check_input(antisite_defs):
raise ValueError('Antisite energy is not defined')
formation_energies = {}
formation_energies['vacancies'] = copy.deepcopy(vac_defs)
formation_energies['antisites'] = copy.deepcopy(antisite_defs)
formation_energies['solute'] = copy.deepcopy(solute_defs)
for vac in formation_energies['vacancies']:
del vac['energy']
for asite in formation_energies['antisites']:
del asite['energy']
for solute in formation_energies['solute']:
del solute['energy']
# Setup the system
site_species = [vac_def['site_specie'] for vac_def in vac_defs]
solute_specie = solute_defs[0]['substitution_specie']
site_species.append(solute_specie)
multiplicity = [vac_def['site_multiplicity'] for vac_def in vac_defs]
m = len(set(site_species)) # distinct species
n = len(vac_defs) # inequivalent sites
# Reduce the system and associated parameters such that only distinctive
# atoms are retained
comm_div = gcd(*tuple(multiplicity))
multiplicity = [val/comm_div for val in multiplicity]
multiplicity.append(0)
e0 = e0/comm_div
T = Float(T)
#c0 = np.diag(multiplicity)
c0 = np.diag(np.ones(n+1))
c0[n,n] = 0
mu = [Symbol('mu'+str(i)) for i in range(m)]
# Generate maps for hashing
# Generate specie->mu map and use it for site->mu map
specie_order = [] # Contains hash for site->mu map Eg: [Al, Ni]
site_specie_set = set() # Eg: {Ni, Al}
for i in range(len(site_species)):
site_specie = site_species[i]
if site_specie not in site_specie_set:
site_specie_set.add(site_specie)
specie_order.append(site_specie)
site_mu_map = [] # Eg: [mu0,mu0,mu0,mu1] where mu0->Al, and mu1->Ni
for i in range(len(site_species)):
site_specie = site_species[i]
j = specie_order.index(site_specie)
site_mu_map.append(j)
specie_site_index_map = [] # Eg: [(0,3),(3,4)] for Al & Ni
for i in range(m):
low_ind = site_species.index(specie_order[i])
if i < m-1:
hgh_ind = site_species.index(specie_order[i+1])
else:
hgh_ind = len(site_species)
specie_site_index_map.append((low_ind,hgh_ind))
"""
dC: delta concentration matrix:
dC[i,j,k]: Concentration change of atom i, due to presence of atom
j on lattice site k
Special case is [i,i,i] which is considered as vacancy
Few cases: dC[i,i,i] = -1 due to being vacancy special case
dC[k,k,i] = +1 due to increment in k at i lattice if i
lattice type is of different element
dC[i,k,i] = -1 due to decrement of ith type atom due to
presence of kth type atom on ith sublattice and kth type
atom specie is different from ith sublattice atom specie
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n+1,n+1,n), dtype=np.int)
for i in range(n):
for j in range(n):
for k in range(n):
if i == j and site_species[j] != site_species[k] and \
site_species[i] != site_species:
dC[i,j,k] = 1
for j in range(n+1):
for k in range(n):
if i == k:
dC[i,j,k] = -1
for k in range(n):
dC[n,n,k] = 1
for k in range(n):
for j in range(n):
if i != j:
if site_species[i] == site_species[k]:
dC[i,j,k] = 0
for ind_map in specie_site_index_map:
if ind_map[1]-ind_map[0] > 1:
for index1 in range(ind_map[0]+1,ind_map[1]):
for index2 in range(ind_map[0]):
for i in range(n):
dC[i,index1,index2] = 0
for index2 in range(ind_map[1],n):
for i in range(n):
dC[i,index1,index2] = 0
# dE matrix: Flip energies (or raw defect energies)
els = [vac_def['site_specie'] for vac_def in vac_defs]
dE = []
for i in range(n+1):
dE.append([])
for i in range(n+1):
for j in range(n):
dE[i].append(0)
for j in range(n):
for i in range(n):
if i == j:
dE[i][j] = vac_defs[i]['energy']
else:
sub_specie = vac_defs[i]['site_specie']
site_specie = vac_defs[j]['site_specie']
if site_specie == sub_specie:
dE[i][j] = 0
else:
for as_def in antisite_defs:
if int(as_def['site_index']) == j+1 and \
sub_specie == as_def['substitution_specie']:
dE[i][j] = as_def['energy']
break
# Solute
site_specie = vac_defs[j]['site_specie']
for solute_def in solute_defs:
def_site_ind = int(solute_def['site_index'])
def_site_specie = solute_def['site_specie']
if def_site_specie == site_specie and def_site_ind == j+1:
dE[n][j] = solute_def['energy']
break
dE = np.array(dE)
#np.where(dE == np.array(None), 0, dE)
# Initialization for concentrations
# c(i,p) == presence of ith type atom on pth type site
c = Matrix(n+1,n,[0]*n*(n+1))
for i in range(n+1):
for p in range(n):
c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n+1):
sum_mu = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p]) for j in range(n+1)])
flip = dC[i,epi,p] * exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
c[i,p] += flip
host_c = Matrix(n,n,[0]*n*n)
for i in range(n):
for p in range(n):
host_c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p]) for j in range(n)])
flip = dC[i,epi,p] * exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
host_c[i,p] += flip
#specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
#total_c = [sum(c[ind[0]:ind[1]]) for ind in specie_site_index_map]
total_c = []
for ind in specie_site_index_map:
val = 0
for i in range(*ind):
sum_i = sum([c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
total_c.append(val)
c_ratio = [total_c[i]/sum(total_c) for i in range(m)]
host_total_c = []
for ind in specie_site_index_map[:-1]:
val = 0
for i in range(*ind):
sum_i = sum([host_c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
host_total_c.append(val)
host_c_ratio = [host_total_c[i]/sum(host_total_c) for i in range(m-1)]
# Expression for Omega, the Grand Potential
omega1 = e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:])*multiplicity[i] \
for i in range(n)])
omega = omega1
used_dEs = []
for p_r in range(n):
for epi in range(n):
sum_mu1 = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p_r]) for j in range(n)])
sum_mu = sum_mu1 - mu[site_mu_map[n]]* dC[n,epi,p_r]
if p_r != epi and site_mu_map[p_r] == site_mu_map[epi]:
continue
if dE[epi,p_r] not in used_dEs:
omega1 -= k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu1)/(k_B*T))
omega -= k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu)/(k_B*T))
used_dEs.append(dE[epi,p_r])
# Compute composition ranges
max_host_specie_concen = 1-solute_concen
mult = multiplicity
specie_concen = [
sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
comp1_min = sum(multiplicity[li:hi])/sum(multiplicity)* \
max_host_specie_concen - 0.01
comp1_max = sum(multiplicity[li:hi])/sum(multiplicity)* \
max_host_specie_concen + 0.01
delta = (comp1_max - comp1_min)/50.0
#def reduce_mu():
# omega = [e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:]) for i in range(n)])]
# x = solve(omega)
# return x
def reduce_mu():
host_concen = 1-solute_concen
new_c0 = c0.astype(float)
for i in range(n):
new_c0[i,i] = host_concen*c0[i,i]
new_c0[n,n] = 2*solute_concen
omega = [
e0-sum([mu[site_mu_map[i]]*sum(new_c0[i,:])
for i in range(n+1)])]
x = solve(omega)
return x
def compute_solute_mu_by_lin_search(host_mu_vals):
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
max_host_specie_concen = 1-solute_concen
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
y_vect = host_specie_concen_ratio
vector_func = [y_vect[i]-c_ratio[i] for i in range(m)]
vector_func.append(omega)
min_diff = 1e10
mu_vals = None
c_val = None
m1_min = -20.0
if e0 > 0:
m1_max = 10 # Search space needs to be modified
else:
m1_max = 0
for m1 in np.arange(m1_min,m1_max,0.1):
trial_mus = host_mu_vals+[m1]
try:
x = nsolve(vector_func,mu,trial_mus,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
break
except:
continue
else:
raise ValueError()
return mu_vals
def compute_mus():
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [
sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
max_host_specie_concen = 1-solute_concen
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
y_vect = host_specie_concen_ratio
vector_func = [y_vect[i]-c_ratio[i] for i in range(m)]
vector_func.append(omega)
mu_vals = None
c_val = None
m_min = -15.0
if e0 > 0:
m_max = 10 # Search space needs to be modified
else:
m_max = 0
for m1 in np.arange(m_min,m_max,0.3):
for m2 in np.arange(m_min,m_max,0.3):
m0 = mu_red[mu[0]].subs([(mu[1],m1),(mu[2],m2)])
try:
mu_vals = nsolve(vector_func,mu,[m0,m1,m2],module="numpy")
# Line needs to be modified to include all mus when n > 2
except:
continue
break
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
else:
raise ValueError("Couldn't find mus")
return mu_vals
if not trial_chem_pot:
# Try computing mus by assuming one of the defects is dominant at 0.01
# concen. First vacancy is tried and then antisite
# Generate trial mus assuming vacancy as dominant defect
#for specie-0 at lower yval
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
li1 = specie_site_index_map[1][0]
hi1 = specie_site_index_map[1][1]
spec_mult = [sum(multiplicity[li:hi]), sum(multiplicity[li1:hi1])]
ln_def_conc = 4.60517
for i in range(li,hi):
vac_flip_en = vac_defs[i]['energy']
mu_vals = [ln_def_conc*k_B*T -vac_flip_en]
mu_vals.append((e0 - spec_mult[0]*mu_vals[0]) / spec_mult[1])
comp_ratio = comp1_min
# Test if the trial mus are good
vector_func = [comp_ratio - host_c_ratio[0]]
vector_func.append(omega1)
try:
host_mu_vals = nsolve(vector_func,mu[:-1],mu_vals)
if host_mu_vals:
host_mu_vals = [float(mu_val) for mu_val in host_mu_vals]
compute_solute_mu_by_lin_search(host_mu_vals)
break
except: # Go for antisite as dominant defect
mu_gs = [Symbol('mu_gs'+j.__str__()) for j in range(m-1)]
eqs = [mu_gs[0]-mu_gs[1] - (ln_def_conc*k_B*T-antisite_defs[i][
'energy'])]
eqs.append(spec_mult[0]*mu_gs[0] + spec_mult[1]*mu_gs[1] - e0)
x = solve(eqs, mu_gs)
host_mu_vals = []
for key in sorted(x.keys(),key=lambda inp: inp.name):
host_mu_vals.append(x[key])
vector_func = [comp_ratio - host_c_ratio[0]]
vector_func.append(omega1)
try:
host_mu_vals = nsolve(vector_func,mu[:-1],host_mu_vals)
if host_mu_vals:
host_mu_vals = [float(mu_val) for mu_val in host_mu_vals]
mu_vals = compute_solute_mu_by_lin_search(host_mu_vals)
break
except: # Go to the default option (search the space)
pass
else:
mu_vals = compute_mus()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus()
# Compile mu's for all composition ratios in the range
#+/- 1% from the stoichiometry
result = {}
for y in np.arange(comp1_min,comp1_max+delta,delta):
y_vect = []
y_vect.append(y)
y2 = max_host_specie_concen - y
y_vect.append(y2)
y_vect.append(solute_concen)
vector_func = [y_vect[i]-c_ratio[i] for i in range(1,m)]
vector_func.append(omega)
try:
x = nsolve(vector_func,mu,mu_vals)
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
continue
result[y] = mu_vals
res = []
# Compute the concentrations for all the compositions
for key in sorted(result.keys()):
mu_val = result[key]
total_c_val = [total_c[i].subs(dict(zip(mu,mu_val))) \
for i in range(len(total_c))]
c_val = c.subs(dict(zip(mu,mu_val)))
# Concentration of first element/over total concen
res1 = []
res1.append(float(total_c_val[0]/sum(total_c_val)))
sum_c0 = sum([c0[i,i] for i in range(n)])
for i in range(n+1):
for j in range(n):
if i == j: # Vacancy
vac_conc = float(exp(-(mu_val[site_mu_map[i]]+dE[i,i])/(k_B*T)))
res1.append(vac_conc)
else: # Antisite
res1.append(float(c_val[i,j]/c0[j,j]))
res.append(res1)
res = np.array(res)
dtype = [(str('x'),np.float64)]+[(str('y%d%d' % (i, j)), np.float64) \
for i in range(n+1) for j in range(n)]
res1 = np.sort(res.view(dtype),order=[str('x')],axis=0)
conc = []
for i in range(n+1):
conc.append([])
for j in range(n):
conc[i].append([])
for i in range(n+1): # Append vacancies
for j in range(n):
y1 = [dat[0][i*n+j+1] for dat in res1]
conc[i][j] = y1
# Compute solute site preference
# Removing the functionality
#site_pref_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
#site_pref_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
#site_pref_data['x_label'] = els[0]+ "_mole_fraction"
#site_pref_data['y_label'] = "$"+solute_specie+"_{"+els[0]+"}/("+\
# solute_specie+"_{"+els[0]+"}+"+solute_specie+"_{"+els[1]+"})$"
#y_data = []
#inds = specie_site_index_map[m-1]
#data1 = np.sum([multiplicity[0]*conc[ind][0] for ind in range(*inds)],axis=0)
#data2 = np.sum([multiplicity[1]*conc[ind][1] for ind in range(*inds)],axis=0)
#frac_data = data1/(data1+data2)
#frac_data = frac_data.tolist()
#y_data.append({'data':frac_data})
#site_pref_data['y'] = y_data
# Return all defect concentrations
conc_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
conc_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
conc_data['x_label'] = els[0]+ " mole fraction"
conc_data['y_label'] = "Point defect concentration"
y_data = []
# Vacancy
for i in range(n):
data = conc[i][i]
specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+specie+'}$'
else:
label = vac_string+specie+'_'+str(cur_ind)+'}$'
# Plot data and legend info
y_data.append({'data':data,'name':label})
# Antisites and solute
for i in range(n):
site_specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
for j in range(m):
sub_specie = specie_order[j]
if sub_specie == site_specie:
continue
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
inds = specie_site_index_map[j]
# TODO: Investigate the value below
data = np.sum([conc[ind][i] for ind in range(*inds)],axis=0)
data = data.tolist()
y_data.append({'data':data,'name':label})
conc_data['y'] = y_data
#return site_pref_data, conc_data
return conc_data
@requires(sympy_found,
"solute_defect_density requires Sympy module. Please install it.")
def solute_defect_density(structure, e0, vac_defs, antisite_defs, solute_defs,
solute_concen=0.01, T=800, trial_chem_pot = None,
plot_style="highchargs"):
"""
Wrapper for the solute_site_preference_finder.
The computed plot data is prepared based on plot_style.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
solute_defs: List of solute defect parameters in the dictionary
format. Similary to that of antisite defs, wtih solute specie
specified in substitution_specie
solute_concen: Solute concentration (in fractional value)
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
plot_style (string): Allowed options are
1) highcharts (default)
2) gnuplot
Returns:
The plot data is generated and returned in asked format.
"""
#solute_site_pref_data, def_conc_data = solute_site_preference_finder(
def_conc_data = solute_site_preference_finder(
structure, e0, T, vac_defs, antisite_defs, solute_defs,
solute_concen=solute_concen, trial_chem_pot=trial_chem_pot)
if plot_style == 'highcharts':
"Energy data is ignored in this mode"
hgh_chrt_data = {}
hgh_chrt_data['xAxis'] = def_conc_data['x_label']
hgh_chrt_data['yAxis'] = def_conc_data['y_label']
series = []
x = def_conc_data['x']
for y_data in def_conc_data['y']:
y = y_data['data']
xy = zip(x,y)
xy = [list(el) for el in xy]
name = y_data['name'].strip('$')
flds= name.split('_')
def_string = flds[0]
site_string = flds[1].strip('{}')
name = def_string+"<sub>"+site_string+"</sub>"
#series.append({'data':xy, 'name':y_data['name']})
series.append({'data':xy, 'name':name})
hgh_chrt_data['series'] = series
return hgh_chrt_data
elif plot_style == 'gnuplot':
def data_to_rows(inp_data, y_lbl_flg):
rows = []
labels = []
labels.append(inp_data['x_label'])
if y_lbl_flg:
labels.append(inp_data['y_label'])
else:
labels += [y['name'] for y in inp_data['y']]
rows.append('#'+'\t'.join(labels))
m = len(inp_data['x'])
for i in range(m):
data = []
data.append(inp_data['x'][i])
data += [y['data'][i] for y in inp_data['y']]
data = [float(x) for x in data]
rows.append('\t'.join(list(map(str,data))))
return rows
#solute_site_pref_rows = data_to_rows(solute_site_pref_data, True)
pt_def_conc_rows = data_to_rows(def_conc_data, False)
#return solute_site_pref_rows, pt_def_conc_rows
return pt_def_conc_rows
|
Dioptas/pymatgen
|
pymatgen/analysis/defects/dilute_solution_model.py
|
Python
|
mit
| 53,718
|
[
"CRYSTAL",
"VASP",
"pymatgen"
] |
06694cd0a8f4977e3ba5ac60484627607e6ae59c4cdf179b1523ec6e4909b013
|
#!/usr/bin/env python
from app import app
from flask.ext.script import Manager, Shell
# The Flask-Script extension provides support for writing external scripts in
# Flask, which includes running a development server. For more info, visit:
# http://flask-script.readthedocs.org/en/latest/.
manager = Manager(app)
def make_shell_context():
return dict(app=app)
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
|
stormpython/brewmaster
|
manage.py
|
Python
|
mit
| 483
|
[
"VisIt"
] |
0cb94212623e5fd9167922650c80b131cb26fd3234325749c56132621301b778
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
Python
|
bsd-3-clause
| 3,490
|
[
"Gaussian"
] |
71ea27f3ff557b3f09b56a79aa22a194caa2b4a09d4c41d64b9b78355fc17dc7
|
import json
import unicodedata # to detect Unicode category
from zdict.dictionary import DictBase
from zdict.exceptions import QueryError, NotFoundError
from zdict.models import Record
class MoeDict(DictBase):
API = 'https://www.moedict.tw/uni/{word}'
@property
def provider(self):
return 'moe'
@property
def title(self):
return '萌典'
def _get_url(self, word) -> str:
return self.API.format(word=word)
def show(self, record: Record):
content = json.loads(record.content)
# print word
self.color.print(content.get('title', ''), 'yellow')
for word in content.get('heteronyms', ''):
# print pronounce
for key, display in (
('bopomofo', '注音'),
('bopomofo2', '注音二式'),
('pinyin', '漢語拼音')
):
self.color.print(display, end='')
self.color.print(
'[' + word.get(key, '') + ']',
'lwhite',
end=' ',
)
print()
print()
# print explain
for count, explain in enumerate(word.get('definitions', '')):
self.color.print(
'{order}. {text}'.format(
order=count + 1,
text=explain.get('def', '')
),
)
if explain.get('synonyms'):
self.color.print(
'同義詞: {text}'.format(text=explain['synonyms']),
'magenta',
indent=2,
)
if explain.get('antonyms'):
self.color.print(
'反義詞: {text}'.format(text=explain['antonyms']),
'magenta',
indent=2,
)
for example in explain.get('example', ''):
self.color.print(
example,
'indigo',
indent=2,
)
for quote in explain.get('quote', ''):
self.color.print(
'[引用] {text}'.format(text=quote),
'green',
indent=2,
)
print()
def query(self, word: str):
try:
content = self._get_raw(word)
except QueryError as exception:
raise NotFoundError(exception.word)
record = Record(
word=word,
content=content,
source=self.provider,
)
return record
def is_other_format(char):
return unicodedata.category(char) != 'Cf'
def remove_cf(data):
return ''.join(filter(is_other_format, data))
def clean(data, clean_cf=False):
'''
Clean the word segmentation
remove "`~" and things in Unicode 'Cf' category
'''
data = data.translate(str.maketrans('', '', '`~'))
if clean_cf:
return remove_cf(data)
else:
return data
class MoeDictTaiwanese(DictBase):
API = 'https://www.moedict.tw/t/{word}.json'
@property
def provider(self):
return 'moe-taiwanese'
@property
def title(self):
return '萌典(臺)'
def _get_url(self, word) -> str:
return self.API.format(word=word)
def show(self, record: Record):
content = json.loads(record.content)
# print word
self.color.print(clean(content.get('t', '')), 'yellow')
for word in content.get('h', ''):
# print pronounce
for key, display in (
# TODO: where is bopomofo ?
('T', '臺羅拼音'), # Tailo
):
self.color.print(display, end='')
self.color.print(
'[' + word.get(key, '') + ']',
'lwhite',
end=' ',
)
print()
print()
# print explain
for count, explain in enumerate(word.get('d', '')):
self.color.print('{order}. '.format(order=count + 1), end='')
type = clean(explain.get('type', ''))
if type:
self.color.print(
'[' + type + ']',
'lgreen',
end=' ',
)
self.color.print(clean(explain.get('f', '')), end='')
for example in explain.get('e', ''):
self.color.print(
clean(example, True),
'indigo',
indent=2,
)
print()
print()
def query(self, word: str):
try:
content = self._get_raw(word)
except QueryError as exception:
raise NotFoundError(exception.word)
record = Record(
word=word,
content=content,
source=self.provider,
)
return record
|
M157q/zdict
|
zdict/dictionaries/moe.py
|
Python
|
gpl-3.0
| 5,222
|
[
"MOE"
] |
ecd8ee60cf9ec99c457e2a7c4d4cf7e6595a4c1e990030916a6eb5880b28fef4
|
#!/usr/bin/env python
#Copyright (C) 2014 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
import os
import argparse
import logging
import random
import numpy as np
from teHmm.common import myLog, EPSILON, initBedTool, cleanBedTool
from teHmm.common import addLoggingOptions, setLoggingFromOptions, logger
from teHmm.common import getLocalTempPath
from teHmm.track import TrackList, Track, CategoryMap
from teHmm.trackIO import readBedIntervals, getMergedBedIntervals
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Create starting transition and emission distributions "
"from a candidate BED annotation, which can"
" be used with teHmmTrain.py using the --initTransProbs and "
"--initEmProbs options, respectively. The distributions created here"
" are extremely simple, but this can be a good shortcut to at least "
"getting the state names into the init files, which can be further "
"tweeked by hand.")
parser.add_argument("tracksInfo", help="Path of Tracks Info file "
"containing paths to genome annotation tracks")
parser.add_argument("trackName", help="Name of Track to use as initial"
" annotation")
parser.add_argument("queryBed", help="Bed file with regions to query")
parser.add_argument("outTransProbs", help="File to write transition model"
" to")
parser.add_argument("outEmProbs", help="File to write emission model to")
parser.add_argument("--numOut", help="Number of \"outside\" states to add"
" to the model.", default=1, type=int)
parser.add_argument("--numTot", help="Add x \"outside\" states such "
"that total states is this. (overrieds --numOut)",
default=0, type=int)
parser.add_argument("--outName", help="Name of outside states (will have"
" numeric suffix if more than 1)", default="Outside")
parser.add_argument("--mode", help="Strategy for initializing the "
"transition graph: {\'star\': all states are connected"
" to the oustide state(s) but not each other; "
" \'data\': transitions estimated from input bed; "
" \'full\': dont write edges and let teHmmTrain.py "
"initialize as a clique}", default="star")
parser.add_argument("--selfTran", help="This script will always write all"
" the self-transition probabilities to the output file. "
"They will all be set to the specified value using this"
" option, or estimated from the data if -1", default=-1.,
type=float)
parser.add_argument("--em", help="Emission probability for input track ("
"ie probability that state emits itself)",
type=float, default=0.95)
parser.add_argument("--outEmNone", help="Add None emission probabilities"
" for target track for Outside states",
action="store_true", default=None)
addLoggingOptions(parser)
args = parser.parse_args()
if args.mode == "star" and args.numOut < 1:
raise RuntimeError("--numOut must be at least 1 if --mode star is used")
if args.mode != "star" and args.mode != "data" and args.mode != "full":
raise RuntimeError("--mode must be one of {star, data, full}")
if args.mode == "data":
raise RuntimeError("--data not implemented yet")
assert os.path.isfile(args.tracksInfo)
setLoggingFromOptions(args)
tempBedToolPath = initBedTool()
# Read the tracks info
trackList = TrackList(args.tracksInfo)
# Extract the track we want
track = trackList.getTrackByName(args.trackName)
if track is None:
raise RuntimeError("Track %s not found in tracksInfo" % args.trackName)
trackPath = track.getPath()
if track.getDist() != "multinomial" and track.getDist() != "gaussian":
raise RuntimeError("Track %s does not have multinomial or "
"gaussian distribution" % args.trackName)
if track.getScale() is not None or track.getLogScale() is not None:
raise RuntimeError("Track %s must not have scale" % args.trackName)
# read query intervals from the bed file
logger.info("loading query intervals from %s" % args.queryBed)
mergedIntervals = getMergedBedIntervals(args.queryBed, ncol=4)
if mergedIntervals is None or len(mergedIntervals) < 1:
raise RuntimeError("Could not read any intervals from %s" %
args.queryBed)
# read the track, while intersecting with query intervals
# (track is saved as temp XML file for sake not changing interface)
bedIntervals = []
for queryInterval in mergedIntervals:
bedIntervals += readBedIntervals(trackPath,
ncol = track.getValCol() + 1,
chrom=queryInterval[0],
start=queryInterval[1],
end=queryInterval[2])
# 1st pass to collect set of names
nameMap = CategoryMap(reserved = 0)
for interval in bedIntervals:
nameMap.update(interval[track.getValCol()])
outNameMap = CategoryMap(reserved = 0)
if args.numTot > 0:
args.numOut = max(0, args.numTot - len(nameMap))
for i in xrange(args.numOut):
outName = args.outName
if args.numOut > 1:
outName += str(i)
assert nameMap.has(outName) is False
outNameMap.update(outName)
# write the transition model for use with teHmmTrain.py --initTransProbs
writeTransitions(bedIntervals, nameMap, outNameMap, args)
# write the emission model for use with teHmmTrain.py --initEmProbs
writeEmissions(bedIntervals, nameMap, outNameMap, args)
cleanBedTool(tempBedToolPath)
def writeTransitions(bedIntervals, nameMap, outNameMap, args):
tfile = open(args.outTransProbs, "w")
# do the self transitions
N = len(nameMap)
selfTran = args.selfTran + np.zeros((N))
if args.selfTran < 0:
tot = np.zeros((N))
num = np.zeros((N))
for interval in bedIntervals:
assert nameMap.has(interval[3])
state = nameMap.getMap(interval[3])
assert state < N
num[state] += 1
tot[state] += interval[2] - interval[1] - 1
selfTran = tot / (tot + num)
for state, i in nameMap.catMap.items():
tfile.write("%s\t%s\t%f\n" % (state, state, selfTran[i]))
if args.mode == "star":
outTrans = (1. - selfTran[i]) / float(args.numOut)
for outState, j in outNameMap.catMap.items():
tfile.write("%s\t%s\t%f\n" % (state, outState, outTrans))
# do the outside states
if args.numOut > 0:
outselfTran = args.selfTran + np.zeros((args.numOut))
if args.selfTran < 0:
# hack for now (should be from data above)
logger.debug("Hacky maximum used for outside state self transition")
outselfTran = max(selfTran) + np.zeros((args.numOut))
for state, i in outNameMap.catMap.items():
tfile.write("%s\t%s\t%f\n" % (state, state, outselfTran[i]))
tfile.close()
def writeEmissions(bedIntervals, nameMap, outNameMap, args):
efile = open(args.outEmProbs, "w")
for state, i in nameMap.catMap.items():
efile.write("%s\t%s\t%s\t%f\n" % (state, args.trackName, state,
args.em))
if args.outEmNone is True:
for state, i in outNameMap.catMap.items():
efile.write("%s\t%s\t%s\t%f\n" % (state, args.trackName, "__NoNE__",
args.em))
efile.close()
if __name__ == "__main__":
sys.exit(main())
|
glennhickey/teHmm
|
bin/createStartingModel.py
|
Python
|
mit
| 8,253
|
[
"Gaussian"
] |
598d30c7e0d68f8887bc246721f52039ef51b511ac11da7ba18fc6a77399100b
|
"""
Contains classes to deal with generic sequence alignment stuff not
specific to a particular program or format.
classes:
o Alignment
"""
# standard library
import string
# biopython
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import Alphabet
from Bio.Alphabet import IUPAC
class Alignment:
"""Represent a set of alignments.
This is a base class to represent alignments, which can be subclassed
to deal with an alignment in a specific format.
"""
def __init__(self, alphabet):
"""Initialize a new Alignment object.
Arguments:
o alphabet - The alphabet to use for the sequence objects that are
created. This alphabet must be a gapped type.
"""
self._alphabet = alphabet
# hold everything at a list of seq record objects
self._records = []
def get_all_seqs(self):
"""Return all of the sequences involved in the alignment.
The return value is a list of SeqRecord objects.
"""
return self._records
def __iter__(self) :
"""Iterate over alignment rows as SeqRecord objects
e.g.
for record in align :
print record.id
print record.seq
"""
return iter(self._records)
def get_seq_by_num(self, number):
"""Retrieve a sequence by the number of the sequence in the consensus.
Returns:
o A Seq object for the requested sequence.
Raises:
o IndexError - If the specified number is out of range.
"""
return self._records[number].seq
def get_alignment_length(self):
"""Return the maximum length of the alignment.
All objects in the alignment should (hopefully) have the same
length. This function will go through and find this length
by finding the maximum length of sequences in the alignment.
"""
max_length = 0
for record in self._records:
if len(record.seq) > max_length:
max_length = len(record.seq)
return max_length
def add_sequence(self, descriptor, sequence, start = None, end = None,
weight = 1.0):
"""Add a sequence to the alignment.
This doesn't do any kind of alignment, it just adds in the sequence
object, which is assumed to be prealigned with the existing
sequences.
Arguments:
o descriptor - The descriptive id of the sequence being added.
This will be used as the resulting SeqRecord's
.id property (and, for historical compatibility,
also the .description property)
o sequence - A string with sequence info.
o start - You can explicitly set the start point of the sequence.
This is useful (at least) for BLAST alignments, which can just
be partial alignments of sequences.
o end - Specify the end of the sequence, which is important
for the same reason as the start.
o weight - The weight to place on the sequence in the alignment.
By default, all sequences have the same weight. (0.0 => no weight,
1.0 => highest weight)
"""
new_seq = Seq(sequence, self._alphabet)
#We are now effectively using the SeqRecord's .id as
#the primary identifier (e.g. in Bio.SeqIO) so we should
#populate it with the descriptor.
#For backwards compatibility, also store this in the
#SeqRecord's description property.
new_record = SeqRecord(new_seq,
id = descriptor,
description = descriptor)
# hack! We really need to work out how to deal with annotations
# and features in biopython. Right now, I'll just use the
# generic annotations dictionary we've got to store the start
# and end, but we should think up something better. I don't know
# if I'm really a big fan of the LocatableSeq thing they've got
# in BioPerl, but I'm not positive what the best thing to do on
# this is...
if start:
new_record.annotations['start'] = start
if end:
new_record.annotations['end'] = end
# another hack to add weight information to the sequence
new_record.annotations['weight'] = weight
self._records.append(new_record)
def get_column(self,col):
"""Returns a string containing a given column"""
col_str = ''
assert col >= 0 and col <= self.get_alignment_length()
for rec in self._records:
col_str += rec.seq[col]
return col_str
if __name__ == "__main__" :
print "Mini self test..."
raw_data = ["ACGATCAGCTAGCT", "CCGATCAGCTAGCT", "ACGATGAGCTAGCT"]
a = Alignment(Alphabet.generic_dna)
a.add_sequence("Alpha", raw_data[0], weight=2)
a.add_sequence("Beta", raw_data[1])
a.add_sequence("Gamma", raw_data[2])
#Iterating over the rows...
for rec in a :
assert isinstance(rec, SeqRecord)
for r,rec in enumerate(a) :
assert isinstance(rec, SeqRecord)
assert raw_data[r] == rec.seq.tostring()
if r==0 : assert rec.annotations['weight']==2
print "Alignment iteraction as SeqRecord OK"
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Align/Generic.py
|
Python
|
apache-2.0
| 5,360
|
[
"BLAST",
"BioPerl",
"Biopython"
] |
fd966396d0bea9fb92eeda4a63a7a1c11655af18507308f49e9dd1e990e1e5ff
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/base.py
|
Python
|
mit
| 100,473
|
[
"VisIt"
] |
1f00de402d4bf732cca9c9fc9ee3b457b3673f76f28fda595080dc5a6ecda49d
|
import argparse,logging,os
import mxnet as mx
from symbol_resnet import resnet
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def multi_factor_scheduler(begin_epoch, epoch_size, step=[60, 75, 90], factor=0.1):
step_ = [epoch_size * (x-begin_epoch) for x in step if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
def main():
if args.data_type == "cifar10":
args.aug_level = 1
args.num_classes = 10
# depth should be one of 110, 164, 1001,...,which is should fit (args.depth-2)%9 == 0
if((args.depth-2)%9 == 0 and args.depth >= 164):
per_unit = [(args.depth-2)/9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif((args.depth-2)%6 == 0 and args.depth < 164):
per_unit = [(args.depth-2)/6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
units = per_unit*3
symbol = resnet(units=units, num_stage=3, filter_list=filter_list, num_class=args.num_classes,
data_type="cifar10", bottle_neck = bottle_neck, bn_mom=args.bn_mom, workspace=args.workspace,
memonger=args.memonger)
elif args.data_type == "imagenet":
args.num_classes = 1000
if args.depth == 18:
units = [2, 2, 2, 2]
elif args.depth == 34:
units = [3, 4, 6, 3]
elif args.depth == 50:
units = [3, 4, 6, 3]
elif args.depth == 101:
units = [3, 4, 23, 3]
elif args.depth == 152:
units = [3, 8, 36, 3]
elif args.depth == 200:
units = [3, 24, 36, 3]
elif args.depth == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
symbol = resnet(units=units, num_stage=4, filter_list=[64, 256, 512, 1024, 2048] if args.depth >=50
else [64, 64, 128, 256, 512], num_class=args.num_classes, data_type="imagenet", bottle_neck = True
if args.depth >= 50 else False, bn_mom=args.bn_mom, workspace=args.workspace,
memonger=args.memonger)
else:
raise ValueError("do not support {} yet".format(args.data_type))
kv = mx.kvstore.create(args.kv_store)
devs = mx.cpu() if args.gpus is None else [mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = max(int(args.num_examples / args.batch_size / kv.num_workers), 1)
begin_epoch = args.model_load_epoch if args.model_load_epoch else 0
if not os.path.exists("./model"):
os.mkdir("./model")
model_prefix = "model/resnet-{}-{}-{}".format(args.data_type, args.depth, kv.rank)
checkpoint = mx.callback.do_checkpoint(model_prefix)
arg_params = None
aux_params = None
if args.retrain:
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.model_load_epoch)
if args.memonger:
import memonger
symbol = memonger.search_plan(symbol, data=(args.batch_size, 3, 32, 32) if args.data_type=="cifar10"
else (args.batch_size, 3, 224, 224))
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "cifar10_train.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "train_256_q90.rec") if args.aug_level == 1
else os.path.join(args.data_dir, "train_480_q90.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
batch_size = args.batch_size,
pad = 4 if args.data_type == "cifar10" else 0,
fill_value = 127, # only used when pad is valid
rand_crop = True,
max_random_scale = 1.0, # 480 with imagnet, 32 with cifar10
min_random_scale = 1.0 if args.data_type == "cifar10" else 1.0 if args.aug_level == 1 else 0.533, # 256.0/480.0
max_aspect_ratio = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 0.25,
random_h = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90
random_s = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
random_l = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
max_rotate_angle = 0 if args.aug_level <= 2 else 10,
max_shear_ratio = 0 if args.aug_level <= 2 else 0.1,
rand_mirror = True,
shuffle = True,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "cifar10_val.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "val_256_q90.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = args.batch_size,
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
rand_crop = False,
rand_mirror = False,
num_parts = kv.num_workers,
part_index = kv.rank)
model = mx.model.FeedForward(
ctx = devs,
symbol = symbol,
arg_params = arg_params,
aux_params = aux_params,
num_epoch = 200 if args.data_type == "cifar10" else 120,
begin_epoch = begin_epoch,
learning_rate = args.lr,
momentum = args.mom,
wd = args.wd,
optimizer = 'nag',
# optimizer = 'sgd',
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
lr_scheduler = multi_factor_scheduler(begin_epoch, epoch_size, step=[120, 160], factor=0.1)
if args.data_type=='cifar10' else
multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90], factor=0.1),
)
model.fit(
X = train,
eval_data = val,
eval_metric = ['acc', 'ce'] if args.data_type=='cifar10' else
['acc', mx.metric.create('top_k_accuracy', top_k = 5)],
kvstore = kv,
batch_end_callback = mx.callback.Speedometer(args.batch_size, args.frequent),
epoch_end_callback = checkpoint)
# logging.info("top-1 and top-5 acc is {}".format(model.score(X = val,
# eval_metric = ['acc', mx.metric.create('top_k_accuracy', top_k = 5)])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="command for training resnet-v2")
parser.add_argument('--gpus', type=str, default='0', help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--data-dir', type=str, default='./data/imagenet/', help='the input data directory')
parser.add_argument('--data-type', type=str, default='imagenet', help='the dataset type')
parser.add_argument('--list-dir', type=str, default='./',
help='the directory which contain the training list file')
parser.add_argument('--lr', type=float, default=0.1, help='initialization learning reate')
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--bn-mom', type=float, default=0.9, help='momentum for batch normlization')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=256, help='the batch size')
parser.add_argument('--workspace', type=int, default=512, help='memory space size(MB) used in convolution, if xpu '
' memory is oom, then you can try smaller vale, such as --workspace 256')
parser.add_argument('--depth', type=int, default=50, help='the depth of resnet')
parser.add_argument('--num-classes', type=int, default=1000, help='the class number of your task')
parser.add_argument('--aug-level', type=int, default=2, choices=[1, 2, 3],
help='level 1: use only random crop and random mirror\n'
'level 2: add scale/aspect/hsv augmentation based on level 1\n'
'level 3: add rotation/shear augmentation based on level 2')
parser.add_argument('--num-examples', type=int, default=1281167, help='the number of training examples')
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--model-load-epoch', type=int, default=0,
help='load the model on an epoch using the model-load-prefix')
parser.add_argument('--frequent', type=int, default=50, help='frequency of logging')
parser.add_argument('--memonger', action='store_true', default=False,
help='true means using memonger to save momory, https://github.com/dmlc/mxnet-memonger')
parser.add_argument('--retrain', action='store_true', default=False, help='true means continue training')
args = parser.parse_args()
logging.info(args)
main()
|
tornadomeet/ResNet
|
train_resnet.py
|
Python
|
apache-2.0
| 9,917
|
[
"Gaussian"
] |
efd481919735e1df51f57dd74997e550e34c5f0def8af38da8fed7aad3f07ea2
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import pylab as pl
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
pl.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = pl.subplot(2, 2, fig_index)
if fig_index == 1:
pl.title('Linear Discriminant Analysis')
pl.ylabel('Data with fixed covariance')
elif fig_index == 2:
pl.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
pl.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
pl.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
pl.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
pl.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
pl.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = pl.xlim()
y_min, y_max = pl.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
pl.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
pl.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
pl.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
pl.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
pl.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
pl.axis('tight')
pl.suptitle('LDA vs QDA')
pl.show()
|
jmargeta/scikit-learn
|
examples/plot_lda_qda.py
|
Python
|
bsd-3-clause
| 4,758
|
[
"Gaussian"
] |
4d45de854d002ce2ae99f4a3e49215cc448e9b6079a2115af534f0f2e944fb93
|
import argparse
import os
import sys
import textwrap
from collections import defaultdict
from xlrd import XLRDError
import pandas as pd
from jinja2 import Environment, FileSystemLoader
import consts
encoding = sys.getfilesystemencoding()
EXEC_DIR = os.path.dirname(unicode(__file__, encoding))
def save_or_print_json(json_str, outdir, json_name):
if outdir:
with open("%s/%s.json" % (outdir, json_name), 'w') as cout:
cout.writelines(json_str)
else:
print "#%s.json" % json_name
print json_str
def is_false(v):
return str(v).lower() in ("", "no", "false", "f", "0")
class MetadataParser(object):
def __init__(self, args):
self.experiment_type = args.data_type
self.nthreads = args.nthreads
self.mem = args.mem
self.file_path = args.meta_file
self.records = self.load_file()
self.separate_jsons = args.separate_jsons
self.default_adapters = args.default_adapters
self.genome_effective_size = args.genome_effective_size
self.genome_ref_first_index = args.genome_ref_first_index
self.genome_sizes_file = args.genome_sizes_file
self.genome_fasta_files = args.genome_fasta_files
self.annotation_file = args.annotation_file
self.rsem_dir = args.rsem_dir
self.encode_blacklist_bedfile = args.encode_blacklist_bedfile
self.trimmomatic_jar = args.trimmomatic_jar
self.picard_jar = args.picard_jar
self.as_narrowPeak = args.as_narrowPeak
self.as_broadPeak = args.as_broadPeak
self.star_genome_dir = args.star_genome_dir
self.bamtools_forward_filter = args.bamtools_forward_filter
self.bamtools_reverse_filter = args.bamtools_reverse_filter
self.preserve_arguments = args.preserve_arguments
self.read_length = args.read_length
self.fastq_gzipped = args.fastq_gzipped
# To allow certain configuration files to be overriden, this is needed
self.purge_undef_args()
def purge_undef_args(self):
for empty_key in [k for k, v in self.__dict__.iteritems() if v is None]:
del self.__dict__[empty_key]
def render_json(self, wf_conf, samples_list, data_dir, template_name):
pass
def parse_metadata(self, data_dir):
pass
def load_file(self):
try:
rows = pd.read_excel(self.file_path,
true_values=['Yes', 'Y', 'yes', 'y', 1],
false_values=['No', 'N', 'no', 'n', 0])
except XLRDError:
rows = pd.read_csv(self.file_path,
true_values=['Yes', 'Y', 'yes', 'y', '1'],
false_values=['No', 'N', 'no', 'n', '0'], sep='\t',
encoding = 'utf-8')
named_cols = [c for c in rows.columns if not c.startswith('unnamed: ')]
rows = rows.loc[:, named_cols]
rows.columns = [c.lower() for c in rows.columns]
return rows
def update_paths(self, ref_data_obj):
options = ref_data_obj.__dict__.iteritems()
if self.preserve_arguments:
options = {(k, v) for k, v in ref_data_obj.__dict__.iteritems() if k not in self.__dict__}
self.__dict__.update(options)
def generateMetadataParser(args):
return MetadataParser(args)
class MetadataParserChipseq(object):
def __init__(self, **kwargs):
self.obj = generateMetadataParser(kwargs['args_obj'])
def __getattr__(self, attr):
return getattr(self.obj, attr)
def render_json(self, wf_conf, samples_list, data_dir, template_name):
env = Environment(extensions=["jinja2.ext.do"], loader=FileSystemLoader(os.path.join(EXEC_DIR, "templates")))
template = env.get_template(template_name + '.j2')
json_str = template.render({'wf_conf': wf_conf,
'samples_list': samples_list,
'data_dir': data_dir,
'conf_args': self
})
json_str = '\n'.join([l for l in json_str.split('\n') if l.strip() != '']) # Remove empty lines
return json_str
def parse_metadata(self, data_dir):
samples_dict = defaultdict(list)
wf_conf_dict = {}
for r_ix, r in self.records.iterrows():
read_type = r['paired-end or single-end'].lower()
sample_info = {'treatment': r['name']}
wf_key = '-'.join([read_type])
if 'control' in r.keys() and r['control'] and type(r['control']) != float: # After reading this metadata info, this will contain a nan (float) if undetermined
sample_info['control'] = r['control']
wf_key += '-with-control'
wf_conf_dict[wf_key] = {'rt': read_type,
'st': sample_info.keys()}
genome = consts.GENOME # Default genome
if 'genome' in r.keys():
genome = r['genome']
samples_dict[wf_key].append([sample_info, genome])
for wf_key, samples_genomes in samples_dict.iteritems():
if self.obj.separate_jsons:
for si, s in enumerate(sorted(samples_genomes)):
sample, genome = s[0], s[1]
ref_dataset = consts.ReferenceDataset(genome)
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], [sample], data_dir, self.experiment_type), wf_key, si
else:
samples_list, genomes_list = zip(*samples_genomes)
if len(set(genomes_list)) > 1:
raise Exception('More than one genome specified (%s). Please create a different metadata file'
' per genome or provide a sjdb and specify the --separate-jsons argument' %
', '.join(set(genomes_list)))
ref_dataset = consts.ReferenceDataset(genomes_list[0])
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], sorted(samples_list), data_dir, self.experiment_type), wf_key, None
class MetadataParserAtacseq(object):
def __init__(self, **kwargs):
self.obj = generateMetadataParser(kwargs['args_obj'])
def __getattr__(self, attr):
return getattr(self.obj, attr)
def render_json(self, wf_conf, samples_list, data_dir, template_name):
env = Environment(extensions=["jinja2.ext.do"], loader=FileSystemLoader(os.path.join(EXEC_DIR, "templates")))
template = env.get_template(template_name + '.j2')
json_str = template.render({'wf_conf': wf_conf,
'samples_list': samples_list,
'data_dir': data_dir,
'nthreads': self.nthreads,
'conf_args': self
})
json_str = '\n'.join([l for l in json_str.split('\n') if l.strip() != '']) # Remove empty lines
return json_str
def parse_metadata(self, data_dir):
samples_dict = defaultdict(list)
wf_conf_dict = {}
for r_ix, r in self.records.iterrows():
read_type = r['paired-end or single-end'].lower()
sample_info = {'treatment': r['name']}
wf_key = '-'.join([read_type])
genome = consts.GENOME # Default genome
if 'genome' in r.keys():
genome = r['genome']
if not ('blacklist removal' in r.keys() and is_false(r['blacklist removal'])):
wf_key += '-blacklist-removal'
wf_conf_dict[wf_key] = {'rt': read_type}
samples_dict[wf_key].append([sample_info, genome])
for wf_key, samples_genomes in samples_dict.iteritems():
if self.obj.separate_jsons:
for si, s in enumerate(sorted(samples_genomes)):
sample, genome = s[0], s[1]
ref_dataset = consts.ReferenceDataset(genome)
if 'blacklist-removal' not in wf_key:
ref_dataset.encode_blacklist_bedfile = None
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], [sample], data_dir, self.experiment_type), wf_key, si
else:
samples_list = [s[0] for s in samples_genomes]
genomes_list = [g[1] for g in samples_genomes]
if len(set(genomes_list)) > 1:
raise Exception('More than one genome specified (%s). Please create a different metadata file'
' per genome or provide a sjdb and specify the --separate-jsons argument' %
', '.join(set(genomes_list)))
ref_dataset = consts.ReferenceDataset(genomes_list[0])
if 'blacklist-removal' not in wf_key:
ref_dataset.encode_blacklist_bedfile = None
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], sorted(samples_list), data_dir, self.experiment_type), wf_key, None
class MetadataParserRnaseq(object):
def __init__(self, **kwargs):
self.obj = generateMetadataParser(kwargs['args_obj'])
self.skip_star_2pass = kwargs['args_obj'].skip_star_2pass
def __getattr__(self, attr):
return getattr(self.obj, attr)
def render_json(self, wf_conf, samples_list, data_dir):
env = Environment(extensions=["jinja2.ext.do"], loader=FileSystemLoader(os.path.join(EXEC_DIR, "templates")))
template = env.get_template(self.experiment_type + '.j2')
json_str = template.render({'wf_conf': wf_conf,
'samples_list': samples_list,
'data_dir': data_dir,
'nthreads': self.nthreads,
'conf_args': self
})
json_str = '\n'.join([l for l in json_str.split('\n') if l.strip() != '']) # Remove empty lines
return json_str
def parse_metadata(self, data_dir):
samples_dict = defaultdict(list)
wf_conf_dict = {}
for rix, r in self.records.iterrows():
read_type = r['paired-end or single-end'].lower()
sample_name = r['name']
strand_specific = r['strand specificity']
genome = consts.GENOME # Default genome
if 'genome' in r.keys():
genome = r['genome']
ercc_spikein = False
if 'with ercc spike-in' in r.keys():
ercc_spikein = r['with ercc spike-in']
kws = [read_type, strand_specific]
if self.skip_star_2pass:
kws.append('with-sjdb')
wf_key = '-'.join(kws)
wf_conf_dict[wf_key] = {'rt': read_type, 'sn': sample_name}
read_length = self.read_length
if 'read length' in r.keys():
read_length = int(r['read length'])
samples_dict[wf_key].append([sample_name, genome, ercc_spikein, read_length])
for wf_key, samples_genomes in samples_dict.iteritems():
if self.obj.separate_jsons:
for si, s in enumerate(sorted(samples_genomes)):
sample, genome, ercc_spikein, read_length = s
ref_dataset = consts.ReferenceDataset(genome,
read_length=read_length,
with_ercc=ercc_spikein)
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], [sample], data_dir), wf_key, si
else:
samples_list = [s[0] for s in samples_genomes]
genomes_list = [g[1] for g in samples_genomes]
ercc_list = [e[2] for e in samples_genomes]
read_length_list = [l[3] for l in samples_genomes]
if len(set(genomes_list)) > 1:
raise Exception(
'More than one genome specified (%s). Please create a different metadata file'
' per genome or provide a sjdb and specify the --separate-jsons argument' %
', '.join(set(genomes_list)))
if len(set(ercc_list)) > 1:
raise Exception(
'With and without ERCC spike-in specified. Please create a different metadata file'
' per ERCC choice or provide a sjdb and specify the --separate-jsons argument')
if len(set(read_length_list)) > 1:
raise Exception(
'More than one read length specified. Please create a different metadata file'
' per read length choice or provide a sjdb and specify the --separate-jsons argument')
ref_dataset = consts.ReferenceDataset(genomes_list[0],
read_length=read_length_list[0],
with_ercc=ercc_list[0])
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], sorted(samples_list), data_dir), wf_key, None
class MetadataParserStarrseq(object):
def __init__(self, **kwargs):
self.obj = generateMetadataParser(kwargs['args_obj'])
def __getattr__(self, attr):
return getattr(self.obj, attr)
def render_json(self, wf_conf, samples_list, data_dir):
env = Environment(extensions=["jinja2.ext.do"], loader=FileSystemLoader(os.path.join(EXEC_DIR, "templates")))
template = env.get_template(self.experiment_type + '.j2')
json_str = template.render({'wf_conf': wf_conf,
'samples_list': samples_list,
'data_dir': data_dir,
'nthreads': self.nthreads,
'conf_args': self
})
json_str = '\n'.join([l for l in json_str.split('\n') if l.strip() != '']) # Remove empty lines
return json_str
def parse_metadata(self, data_dir):
samples_dict = defaultdict(list)
wf_conf_dict = {}
for rix, r in self.records.iterrows():
read_type = r['paired-end or single-end'].lower()
sample_name = r['name']
genome = consts.GENOME # Default genome
if 'genome' in r.keys():
genome = r['genome']
kws = [read_type]
wf_key = '-'.join(kws)
with_umis = 'umis' in r.keys() and not is_false(r['umis'])
if with_umis:
wf_key += '-umis'
wf_conf_dict[wf_key] = {'rt': read_type, 'sn': sample_name, 'umis': with_umis}
samples_dict[wf_key].append([sample_name, genome])
for wf_key, samples_genomes in samples_dict.iteritems():
if self.obj.separate_jsons:
for si, s in enumerate(sorted(samples_genomes)):
sample, genome = s[0], s[1]
ref_dataset = consts.ReferenceDataset(genome,
read_length=self.read_length,
umis='umis' in wf_key)
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], [sample], data_dir), wf_key, si
else:
samples_list = [s[0] for s in samples_genomes]
genomes_list = [g[1] for g in samples_genomes]
if len(set(genomes_list)) > 1:
raise Exception(
'More than one genome specified (%s). Please create a different metadata file'
' per genome or provide a sjdb and specify the --separate-jsons argument' %
', '.join(set(genomes_list)))
ref_dataset = consts.ReferenceDataset(genomes_list[0],
read_length=self.read_length,
umis='umis' in wf_key)
self.update_paths(ref_dataset)
yield self.render_json(wf_conf_dict[wf_key], sorted(samples_list), data_dir), wf_key, None
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Parse metadata and create workflow JSONs using templates
--------------------------------------------------------
'''))
#############################
# Base script options
parser.add_argument('-o', '--outdir', metavar='output_dir', required=False, dest='outdir', type=str,
help='Output directory where the files will be placed.')
parser.add_argument('-m', '--metadata-file', dest='meta_file', required=True,
help='Metadata file used to create the JSON config files. By convention, the file name should '
'start with the type of experiment, for example *chip-seq* or *chip_seq*. '
'Internally, this name is used identify the target template.')
parser.add_argument('-d', '--data-dir', dest='data_dir', required=True,
help='Project directory containing the fastq data files.')
parser.add_argument('-t', '--metadata-type', dest='data_type', choices=['chip-seq', 'rna-seq', 'atac-seq', 'starr-seq'],
default='chip-seq', help='Experiment type for the metadata.')
parser.add_argument('--nthreads', type=int, dest='nthreads', default=consts.CPUS, help='Number of threads.')
parser.add_argument('--mem', type=int, dest='mem', default=consts.MEM, help='Memory for Java based CLT.')
parser.add_argument('--separate-jsons', action='store_true', help='Create one JSON per sample in the metadata.')
parser.add_argument('--skip-star-2pass', action='store_true', default=False,
help='''[RNA-seq only]
Skip the STAR 2-pass step and use the genomeDir index for mapping.
By default, a STAR 2-pass strategy if implemented to create a splice junctions
file used to create a new STAR genome.''')
parser.add_argument('--preserve-arguments', action='store_true', default=False,
help='''Preserve options specified in the command line over default values stored in consts.py''')
#############################
# Overwrittable default paths
parser.add_argument('--read-length', help='Read length of the sequenced FASTQ files.',
type=int, default=consts.READ_LENGTH)
parser.add_argument('--default-adapters', help='File with adapter sequences to be trimmed out).')
parser.add_argument('--genome-ref-first-index', help='[non RNA-seq only] First index file of the Bowtie reference genome.')
parser.add_argument('--genome-sizes-file', help='Chromosome sizes file')
parser.add_argument('--encode-blacklist-bedfile', help='ENCODE blacklist bedfile to mask out un-mappable regions')
parser.add_argument('--genome-effective-size', help='Genome effective or mappable size. Used in some deeptools commands')
parser.add_argument('--star-genome-dir', help='[RNA-seq only] Directory containing the STAR Genome files (indices).')
parser.add_argument('--annotation-file', help='[RNA-seq only] Gene annotation GTF file')
parser.add_argument('--genome-fasta-files', nargs="*", help='[RNA-seq only] Genome FASTA file')
parser.add_argument('--rsem-dir',
help='[RNA-seq only] RSEM reference/index directory (all index files should be included in this directory)')
parser.add_argument('--as-narrowPeak', default=consts.as_narrowPeak,
help='AutoSQL file defining non-standard fields for narrowPeak files '
'(formats available in https://github.com/ucscGenomeBrowser/kent/tree/master/src/hg/lib/encode)')
parser.add_argument('--as-broadPeak', default=consts.as_broadPeak,
help='AutoSQL file defining non-standard fields for broadPeak files '
'(formats available in https://github.com/ucscGenomeBrowser/kent/tree/master/src/hg/lib/encode)')
parser.add_argument('--bamtools-forward-filter', default=consts.bamtools_forward_filter,
help='Rules for forward reads used in Bamtools')
parser.add_argument('--bamtools-reverse-filter', default=consts.bamtools_reverse_filter,
help='Rules for reverse reads used in Bamtools')
parser.add_argument('--trimmomatic-jar', default=consts.trimmomatic_jar,
help='Trimmomatic JAVA jar file')
parser.add_argument('--picard-jar', default=consts.picard_jar,
help='Picard JAVA jar file')
parser.add_argument('--fastq-gzipped', action='store_true', default=False,
help='FASTQ files are gzipped, with .fastq.gz extensions')
# Parse input
args = parser.parse_args()
if os.path.isfile(args.outdir):
print "[ERROR] :: Target output directory is an existing file."
sys.exit(1)
if args.outdir and not os.path.exists(args.outdir):
os.mkdir(args.outdir)
if args.data_type == 'chip-seq':
meta_parser = MetadataParserChipseq(args_obj=args)
elif args.data_type == 'rna-seq':
meta_parser = MetadataParserRnaseq(args_obj=args)
elif args.data_type == 'atac-seq':
meta_parser = MetadataParserAtacseq(args_obj=args)
elif args.data_type == 'starr-seq':
meta_parser = MetadataParserStarrseq(args_obj=args)
else:
raise Exception('Unrecognized Experiment Type: %s' % args.data_type)
file_basename = os.path.splitext(os.path.basename(args.meta_file))[0]
for json_str, conf_name, idx in meta_parser.parse_metadata(args.data_dir.rstrip('/')):
if args.separate_jsons:
conf_name += '-%d' % idx
save_or_print_json(json_str, args.outdir, file_basename + '-' + conf_name)
if __name__ == '__main__':
main()
|
Duke-GCB/GGR-cwl
|
v1.0/json-generator/run.py
|
Python
|
mit
| 22,633
|
[
"Bowtie"
] |
193b84ced1fc15d02b33343445417776f57c2975927292e191890f0123b32496
|
"""
This script adds a new user_address table that is currently only used with sample requests, where
a user can select from a list of his addresses to associate with the request. This script also
drops the request.submitted column which was boolean and replaces it with a request.state column
which is a string, allowing for more flexibility with request states.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.exc import *
from galaxy.model.custom_types import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
import sys, logging
# Need our custom types, but don't import anything else from model
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def display_migration_details():
print "========================================"
print "This script adds a new user_address table that is currently only used with sample requests, where"
print "a user can select from a list of his addresses to associate with the request. This script also"
print "drops the request.submitted column which was boolean and replaces it with a request.state column"
print "which is a string, allowing for more flexibility with request states."
print "========================================"
UserAddress_table = Table( "user_address", metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "desc", TEXT),
Column( "name", TrimmedString( 255 ), nullable=False),
Column( "institution", TrimmedString( 255 )),
Column( "address", TrimmedString( 255 ), nullable=False),
Column( "city", TrimmedString( 255 ), nullable=False),
Column( "state", TrimmedString( 255 ), nullable=False),
Column( "postal_code", TrimmedString( 255 ), nullable=False),
Column( "country", TrimmedString( 255 ), nullable=False),
Column( "phone", TrimmedString( 255 )),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
def upgrade(migrate_engine):
#raise Exception
metadata.bind = migrate_engine
display_migration_details()
# Load existing tables
metadata.reflect()
# Add all of the new tables above
try:
UserAddress_table.create()
except Exception, e:
log.debug( "Creating user_address table failed: %s" % str( e ) )
# Add 1 column to the request_type table
try:
RequestType_table = Table( "request_type", metadata, autoload=True )
except NoSuchTableError:
RequestType_table = None
log.debug( "Failed loading table request_type" )
if RequestType_table is not None:
try:
col = Column( "deleted", Boolean, index=True, default=False )
col.create( RequestType_table, index_name='ix_request_type_deleted')
assert col is RequestType_table.c.deleted
except Exception, e:
log.debug( "Adding column 'deleted' to request_type table failed: %s" % ( str( e ) ) )
# Delete the submitted column
# This fails for sqlite, so skip the drop -- no conflicts in the future
try:
Request_table = Table( "request", metadata, autoload=True )
except NoSuchTableError:
Request_table = None
log.debug( "Failed loading table request" )
if Request_table is not None:
if migrate_engine.name != 'sqlite':
#DBTODO drop from table doesn't work in sqlite w/ sqlalchemy-migrate .6+
Request_table.c.submitted.drop()
col = Column( "state", TrimmedString( 255 ), index=True )
col.create( Request_table, index_name='ix_request_state')
assert col is Request_table.c.state
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0012_user_address.py
|
Python
|
gpl-3.0
| 4,173
|
[
"Galaxy"
] |
389b1325470a5f7533429f1943383a52e0caf2eecf2c8b19aff4ad6c75a62d84
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class Eccodes(CMakePackage):
"""ecCodes is a package developed by ECMWF for processing meteorological
data in GRIB (1/2), BUFR (3/4) and GTS header formats."""
homepage = 'https://software.ecmwf.int/wiki/display/ECC/ecCodes+Home'
url = 'https://software.ecmwf.int/wiki/download/attachments/45757960/eccodes-2.2.0-Source.tar.gz?api=v2'
list_url = 'https://software.ecmwf.int/wiki/display/ECC/Releases'
maintainers = ['skosukhin']
version('2.5.0', '5a7e92c58418d855082fa573efd352aa')
version('2.2.0', 'b27e6f0a3eea5b92dac37372e4c45a62')
variant('netcdf', default=False,
description='Enable GRIB to NetCDF conversion tool')
variant('jp2k', default='openjpeg', values=('openjpeg', 'jasper', 'none'),
description='Specify JPEG2000 decoding/encoding backend')
variant('png', default=False,
description='Enable PNG support for decoding/encoding')
variant('aec', default=False,
description='Enable Adaptive Entropy Coding for decoding/encoding')
variant('pthreads', default=False,
description='Enable POSIX threads')
variant('openmp', default=False,
description='Enable OpenMP threads')
variant('memfs', default=False,
description='Enable memory based access to definitions/samples')
variant('python', default=False,
description='Enable the Python interface')
variant('fortran', default=False, description='Enable the Fortran support')
variant('examples', default=True,
description='Build the examples (part of the full test suite)')
variant('test', default=True, description='Enable the tests')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'Production'))
# The building script tries to find an optional package valgrind when
# tests are enabled but the testing scripts don't use it.
# depends_on('valgrind', type='test', when='+test')
depends_on('netcdf', when='+netcdf')
depends_on('openjpeg@1.5.0:1.5.999,2.1.0:2.1.999', when='jp2k=openjpeg')
depends_on('jasper', when='jp2k=jasper')
depends_on('libpng', when='+png')
depends_on('libaec', when='+aec')
# Can be built with Python2 or Python3.
depends_on('python', when='+memfs', type='build')
# The interface works only for Python2.
depends_on('python@2.6:2.999', when='+python',
type=('build', 'link', 'run'))
depends_on('py-numpy', when='+python', type=('build', 'run'))
extends('python', when='+python')
conflicts('+openmp', when='+pthreads',
msg='Cannot enable both POSIX threads and OMP')
# The following enforces linking against the specified JPEG2000 backend.
patch('enable_only_openjpeg.patch', when='jp2k=openjpeg')
patch('enable_only_jasper.patch', when='jp2k=jasper')
# CMAKE_INSTALL_RPATH must be a semicolon-separated list.
patch('cmake_install_rpath.patch')
@run_before('cmake')
def check_fortran(self):
if '+fortran' in self.spec and self.compiler.fc is None:
raise InstallError(
'Fortran interface requires a Fortran compiler!')
def cmake_args(self):
var_opt_list = [('+pthreads', 'ECCODES_THREADS'),
('+openmp', 'ECCODES_OMP_THREADS'),
('+memfs', 'MEMFS'),
('+python', 'PYTHON'),
('+fortran', 'FORTRAN'),
('+examples', 'EXAMPLES'),
('+test', 'TESTS'),
('+test', 'EXTRA_TESTS')]
args = ['-DENABLE_%s=%s' % (opt, 'ON' if var in self.spec else 'OFF')
for var, opt in var_opt_list]
if '+netcdf' in self.spec:
args.extend(['-DENABLE_NETCDF=ON',
# Prevent overriding by environment variable
# HDF5_ROOT.
'-DHDF5_ROOT=' + self.spec['hdf5'].prefix,
# Prevent possible overriding by environment variables
# NETCDF_ROOT, NETCDF_DIR, and NETCDF_PATH.
'-DNETCDF_PATH=' + self.spec['netcdf'].prefix])
else:
args.append('-DENABLE_NETCDF=OFF')
if self.spec.variants['jp2k'].value == 'none':
args.append('-DENABLE_JPG=OFF')
else:
args.append('-DENABLE_JPG=ON')
if self.spec.variants['jp2k'].value == 'openjpeg':
args.append('-DOPENJPEG_PATH=' + self.spec['openjpeg'].prefix)
if '+png' in self.spec:
args.extend(['-DENABLE_PNG=ON',
'-DZLIB_ROOT=' + self.spec['zlib'].prefix])
else:
args.append('-DENABLE_PNG=OFF')
if '+aec' in self.spec:
args.extend(['-DENABLE_AEC=ON',
# Prevent overriding by environment variables
# AEC_DIR and AEC_PATH.
'-DAEC_DIR=' + self.spec['libaec'].prefix])
else:
args.append('-DENABLE_AEC=OFF')
if '^python' in self.spec:
args.append('-DPYTHON_EXECUTABLE:FILEPATH=' + python.path)
return args
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/eccodes/package.py
|
Python
|
lgpl-2.1
| 6,557
|
[
"NetCDF"
] |
c967aace12acabd7137d354d16b30655e1d59b5e0b0fa307926fd37ca7a98382
|
""" VOBOXAvailabilityCommand module
"""
# FIXME: NOT Usable ATM
# missing doNew, doCache, doMaster
from urllib import parse
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.Client import Client
from DIRAC.ResourceStatusSystem.Command.Command import Command
class VOBOXAvailabilityCommand(Command):
"""
Given an url pointing to a service on a vobox, use DIRAC ping against it.
"""
def doCommand(self):
"""
The Command pings a service on a vobox, it needs a service URL to ping it.
:returns: a dict with the following:
.. code-block:: python
{
'serviceUpTime' : <serviceUpTime>,
'machineUpTime' : <machineUpTime>,
'site' : <site>,
'system' : <system>,
'service' : <service>
}
"""
# INPUT PARAMETERS
if "serviceURL" not in self.args:
return self.returnERROR(S_ERROR('"serviceURL" not found in self.args'))
serviceURL = self.args["serviceURL"]
##
parsed = parse.urlparse(serviceURL)
site = parsed[1].split(":")[0]
try:
system, service = parsed[2].strip("/").split("/")
except ValueError:
return self.returnERROR(S_ERROR('"%s" seems to be a malformed url' % serviceURL))
pinger = Client(url=serviceURL)
resPing = pinger.ping()
if not resPing["OK"]:
return self.returnERROR(resPing)
serviceUpTime = resPing["Value"].get("service uptime", 0)
machineUpTime = resPing["Value"].get("host uptime", 0)
result = {
"site": site,
"system": system,
"service": service,
"serviceUpTime": serviceUpTime,
"machineUpTime": machineUpTime,
}
return S_OK(result)
# FIXME: how do we get the values !!
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Command/VOBOXAvailabilityCommand.py
|
Python
|
gpl-3.0
| 1,918
|
[
"DIRAC"
] |
04d41da4abbfeac982b9e468551895d8cc1e3a8475dc903e7811befba8b3b0ea
|
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from django.core import signing
from django.utils.crypto import constant_time_compare
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext
import jwt
from rest_framework import exceptions
from rest_framework.authentication import (
BaseAuthentication, get_authorization_header)
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
import olympia.core.logger
from olympia import core
from olympia.api import jwt_auth
from olympia.api.models import APIKey
from olympia.users.models import UserProfile
from olympia.users.utils import UnsubscribeCode
log = olympia.core.logger.getLogger('z.api.authentication')
class WebTokenAuthentication(BaseAuthentication):
"""
DRF authentication class for our internal auth API tokens (i.e. not
external clients using API keys - see JWTKeyAuthentication for that).
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the "Bearer" prefix. For example:
Authorization: Bearer eyJhbGciOiAiSFMyNTYiLCAidHlwIj
"""
www_authenticate_realm = 'api'
auth_header_prefix = 'Bearer'
salt = 'olympia.api.auth'
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(
self.auth_header_prefix.lower(), self.www_authenticate_realm)
def get_token_value(self, request):
auth_header = get_authorization_header(request).split()
expected_header_prefix = self.auth_header_prefix.lower()
if not auth_header or (
smart_text(auth_header[0].lower()) != expected_header_prefix):
return None
if len(auth_header) == 1:
msg = {
'detail': ugettext('Invalid Authorization header. '
'No credentials provided.'),
'code': 'ERROR_INVALID_HEADER'
}
raise exceptions.AuthenticationFailed(msg)
elif len(auth_header) > 2:
msg = {
'detail': ugettext('Invalid Authorization header. Credentials '
'string should not contain spaces.'),
'code': 'ERROR_INVALID_HEADER',
}
raise exceptions.AuthenticationFailed(msg)
return auth_header[1]
def authenticate(self, request):
"""
Returns a two-tuple of `User` and token if a valid tolen has been
supplied. Otherwise returns `None`.
Raises AuthenticationFailed if a token was specified but it's invalid
in some way (expired signature, invalid token, etc.)
"""
token = self.get_token_value(request)
if token is None:
# No token specified, skip this authentication method.
return None
# Proceed.
return self.authenticate_token(token)
def authenticate_token(self, token):
try:
payload = signing.loads(
force_text(token), salt=self.salt,
max_age=settings.SESSION_COOKIE_AGE or None)
except signing.SignatureExpired:
msg = {
'detail': ugettext('Signature has expired.'),
'code': 'ERROR_SIGNATURE_EXPIRED',
}
raise exceptions.AuthenticationFailed(msg)
except signing.BadSignature:
msg = {
'detail': ugettext('Error decoding signature.'),
'code': 'ERROR_DECODING_SIGNATURE'
}
raise exceptions.AuthenticationFailed(msg)
# We have a valid token, try to find the corresponding user.
user = self.authenticate_credentials(payload)
return (user, token)
def authenticate_credentials(self, payload):
"""
Return a non-deleted user that matches the payload's user id.
Mimic what our UserAndAddrMiddleware and django's get_user() do when
authenticating, because otherwise that behaviour would be missing in
the API since API auth happens after the middleware process request
phase.
"""
if 'user_id' not in payload:
log.info('No user_id in token payload {}'.format(payload))
raise exceptions.AuthenticationFailed()
try:
user = UserProfile.objects.filter(deleted=False).get(
pk=payload['user_id'])
except UserProfile.DoesNotExist:
log.info('User not found from token payload {}'.format(payload))
raise exceptions.AuthenticationFailed()
# Check get_session_auth_hash like django's get_user() does.
session_auth_hash = user.get_session_auth_hash()
payload_auth_hash = payload.get('auth_hash', '')
if not constant_time_compare(payload_auth_hash, session_auth_hash):
log.info('User tried to authenticate with invalid auth hash in'
'payload {}'.format(payload))
raise exceptions.AuthenticationFailed()
# Set user in thread like UserAndAddrMiddleware does.
core.set_user(user)
return user
class JWTKeyAuthentication(JSONWebTokenAuthentication):
"""
DRF authentication class for JWT header auth with API keys.
This extends the django-rest-framework-jwt auth class to get the
shared JWT secret from our APIKey database model. Each user (an add-on
developer) can have one or more API keys. The JWT is issued with their
public ID and is signed with their secret.
**IMPORTANT**
Please note that unlike typical JWT usage, this authenticator only
signs and verifies that the user is who they say they are. It does
not sign and verify the *entire request*. In other words, when you use
this authentication method you cannot prove that the request was made
by the authenticated user.
"""
def authenticate(self, request):
"""
Returns a two-tuple of `User` and token if a valid signature has been
supplied using JWT-based authentication. Otherwise returns `None`.
Copied from rest_framework_jwt BaseJSONWebTokenAuthentication, with
the decode_handler changed to our own - because we don't want that
decoder to be the default one in settings - and logging added.
"""
jwt_value = self.get_jwt_value(request)
if jwt_value is None:
return None
try:
payload = jwt_auth.jwt_decode_handler(jwt_value)
except Exception as exc:
try:
# Log all exceptions
log.info('JWTKeyAuthentication failed; '
'it raised %s (%s)', exc.__class__.__name__, exc)
# Re-raise to deal with them properly.
raise exc
except TypeError:
msg = ugettext('Wrong type for one or more keys in payload')
raise exceptions.AuthenticationFailed(msg)
except jwt.ExpiredSignature:
msg = ugettext('Signature has expired.')
raise exceptions.AuthenticationFailed(msg)
except jwt.DecodeError:
msg = ugettext('Error decoding signature.')
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = ugettext('Invalid JWT Token.')
raise exceptions.AuthenticationFailed(msg)
# Note: AuthenticationFailed can also be raised directly from our
# jwt_decode_handler.
user = self.authenticate_credentials(payload)
# Send user_logged_in signal when JWT is used to authenticate an user.
# Otherwise, we'd never update the last_login information for users
# who never visit the site but do use the API to upload new add-ons.
user_logged_in.send(sender=self.__class__, request=request, user=user)
return (user, jwt_value)
def authenticate_credentials(self, payload):
"""
Returns a verified AMO user who is active and allowed to make API
requests.
"""
if 'orig_iat' in payload:
msg = ("API key based tokens are not refreshable, don't include "
"`orig_iat` in their payload.")
raise exceptions.AuthenticationFailed(msg)
try:
api_key = APIKey.get_jwt_key(key=payload['iss'])
except APIKey.DoesNotExist:
msg = 'Invalid API Key.'
raise exceptions.AuthenticationFailed(msg)
if api_key.user.deleted:
msg = 'User account is disabled.'
raise exceptions.AuthenticationFailed(msg)
if not api_key.user.read_dev_agreement:
msg = 'User has not read developer agreement.'
raise exceptions.AuthenticationFailed(msg)
core.set_user(api_key.user)
return api_key.user
def get_jwt_value(self, request):
"""
Get the JWT token from the authorization header.
Copied from upstream's implementation but uses a hardcoded 'JWT'
prefix in order to be isolated from JWT_AUTH_HEADER_PREFIX setting
which is used for the non-api key auth above.
"""
auth = get_authorization_header(request).split()
auth_header_prefix = 'jwt' # JWT_AUTH_HEADER_PREFIX.lower()
if not auth or smart_text(auth[0].lower()) != auth_header_prefix:
return None
if len(auth) == 1:
msg = ugettext('Invalid Authorization header. '
'No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = ugettext('Invalid Authorization header. Credentials string '
'should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return auth[1]
class UnsubscribeTokenAuthentication(BaseAuthentication):
"""
DRF authentication class for email unsubscribe notifications - token and
hash should be provided in the POST data. ONLY use this authentication for
account notifications.
"""
def authenticate(self, request):
try:
email = UnsubscribeCode.parse(
request.data.get('token'), request.data.get('hash'))
user = UserProfile.objects.get(email=email)
except ValueError:
raise exceptions.AuthenticationFailed(
ugettext('Invalid token or hash.'))
except UserProfile.DoesNotExist:
raise exceptions.AuthenticationFailed(
ugettext('Email address not found.'))
return (user, None)
|
kumar303/addons-server
|
src/olympia/api/authentication.py
|
Python
|
bsd-3-clause
| 10,973
|
[
"VisIt"
] |
cb8493f69027de3ca86e4cee7cf2c47f13232e2261d1b91a94c15572f45ce71a
|
VERSION = '0.1.3.0'
PROGRAM = 'dsame'
DESCRIPTION = 'dsame is a program to decode EAS/SAME alert messages'
COPYRIGHT = 'Copyright (C) 2016 Joseph W. Metcalf'
TEST_STRING = 'EAS: ZCZC-WXR-RWT-055027-055039-055047-055117-055131-055137-055139-055015-055071+0030-0771800-KMKX/NWS-'
MSG__TEXT={
'EN' :
{'MSG1' : '{article} {organization} {preposition} {location} {has} issued a {event} valid until {end}',
'MSG2' : '{conjunction} for the following {division} in {state}: ',
'MSG3' : '{county}{punc} ',
'MSG4' : '',
'AND' : 'and',
'ALL' : 'all',
'HAS' : 'has',
'HAVE' : 'have',
'THE' : 'the',
'A' : 'a',
'IN' : 'in',
'' : '',
}
}
FIPS_DIVN={
'' : None,
'02' : 'boroughs',
'22' : 'parishes',
'11' : None,
'57' : None,
'58' : None,
'59' : None,
'60' : None,
'61' : None,
'64' : None,
'65' : None,
'66' : None,
'68' : None,
'69' : None,
'70' : None,
'73' : None,
'74' : None,
'75' : None,
'77' : None,
'78' : None,
'91' : None,
'92' : None,
'93' : None,
'94' : None,
'96' : None,
'97' : None,
'98' : None,
}
US_SAME_AREA={
'LOCATION' : 'US',
'01' : 'Alabama',
'02' : 'Alaska',
'04' : 'Arizona',
'05' : 'Arkansas',
'06' : 'California',
'08' : 'Colorado',
'09' : 'Connecticut',
'10' : 'Delaware',
'11' : 'District of Columbia',
'12' : 'Florida',
'13' : 'Georgia',
'15' : 'Hawaii',
'16' : 'Idaho',
'17' : 'Illinois',
'18' : 'Indiana',
'19' : 'Iowa',
'20' : 'Kansas',
'21' : 'Kentucky',
'22' : 'Louisiana',
'23' : 'Maine',
'24' : 'Maryland',
'25' : 'Massachusetts',
'26' : 'Michigan',
'27' : 'Minnesota',
'28' : 'Mississippi',
'29' : 'Missouri',
'30' : 'Montana',
'31' : 'Nebraska',
'32' : 'Nevada',
'33' : 'New Hampshire',
'34' : 'New Jersey',
'35' : 'New Mexico',
'36' : 'New York',
'37' : 'North Carolina',
'38' : 'North Dakota',
'39' : 'Ohio',
'40' : 'Oklahoma',
'41' : 'Oregon',
'42' : 'Pennsylvania',
'44' : 'Rhode Island',
'45' : 'South Carolina',
'46' : 'South Dakota',
'47' : 'Tennessee',
'48' : 'Texas',
'49' : 'Utah',
'50' : 'Vermont',
'51' : 'Virginia',
'53' : 'Washington',
'54' : 'West Virginia',
'55' : 'Wisconsin',
'56' : 'Wyoming',
'57' : 'Pacific Coast from Washington to California',
'58' : 'Alaskan Coast',
'59' : 'Hawaiian Coast',
'60' : 'American Samoa',
'61' : 'American Samoa Waters',
'64' : 'Federated States of Micronesia',
'65' : 'Mariana Islands Waters (including Guam)',
'66' : 'Guam',
'68' : 'Marshall Islands',
'69' : 'Northern Mariana Islands',
'70' : 'Palau',
'72' : 'Puerto Rico',
'73' : 'Atlantic Coast from Maine to Virginia',
'74' : 'U.S. Minor Outlying Islands',
'75' : 'Atlantic Coast from North Carolina to Florida, and the Coasts of Puerto Rico and Virgin Islands',
'77' : 'Gulf of Mexico',
'78' : 'Virgin Islands',
'91' : 'Lake Superior',
'92' : 'Lake Michigan',
'93' : 'Lake Huron',
'94' : 'Saint Clair River, Detroit River, and Lake Saint Clair',
'96' : 'Lake Erie',
'97' : 'Niagara River and Lake Ontario',
'98' : 'Saint Lawrence River',
'XX' : 'TEST',
}
CA_SAME_AREA={
'LOCATION' : 'CA',
'11' : 'Nova Scotia',
'12' : 'Nova Scotia',
'13' : 'Nova Scotia',
'14' : 'Nova Scotia',
'15' : 'New Brunswick',
'16' : 'New Brunswick',
'17' : 'New Brunswick',
'18' : 'Prince Edward Island',
'21' : 'Newfoundland/Labrador',
'22' : 'Newfoundland/Labrador',
'23' : 'Newfoundland/Labrador',
'24' : 'Newfoundland/Labrador',
'25' : 'Newfoundland/Labrador',
'26' : 'Newfoundland/Labrador',
'27' : 'Newfoundland/Labrador',
'28' : 'Newfoundland/Labrador',
'29' : 'Newfoundland/Labrador',
'31' : 'Quebec',
'32' : 'Quebec',
'33' : 'Quebec',
'34' : 'Quebec',
'35' : 'Quebec',
'36' : 'Quebec',
'37' : 'Quebec',
'38' : 'Quebec',
'39' : 'Quebec',
'41' : 'Ontario',
'42' : 'Ontario',
'43' : 'Ontario',
'44' : 'Ontario',
'45' : 'Ontario',
'46' : 'Ontario',
'47' : 'Ontario',
'48' : 'Ontario',
'49' : 'Ontario',
'51' : 'Manitoba',
'52' : 'Manitoba',
'53' : 'Manitoba',
'54' : 'Manitoba',
'55' : 'Manitoba',
'56' : 'Manitoba',
'57' : 'Manitoba',
'58' : 'Manitoba',
'59' : 'Manitoba',
'61' : 'Saskatchewan',
'62' : 'Saskatchewan',
'63' : 'Saskatchewan',
'64' : 'Saskatchewan',
'65' : 'Saskatchewan',
'66' : 'Saskatchewan',
'67' : 'Saskatchewan',
'68' : 'Saskatchewan',
'71' : 'Alberta',
'72' : 'Alberta',
'73' : 'Alberta',
'74' : 'Alberta',
'75' : 'Alberta',
'76' : 'Alberta',
'77' : 'Alberta',
'78' : 'Alberta',
'79' : 'Alberta',
'81' : 'British Columbia',
'82' : 'British Columbia',
'83' : 'British Columbia',
'84' : 'British Columbia',
'85' : 'British Columbia',
'86' : 'British Columbia',
'87' : 'British Columbia',
'88' : 'British Columbia',
'89' : 'British Columbia',
'91' : 'Yukon',
'92' : 'Yukon',
'93' : 'Yukon',
'94' : 'Northwest Territories',
'95' : 'Northwest Territories',
'96' : 'Northwest Territories',
'97' : 'Nunavut',
'98' : 'Nunavut',
'99' : 'Nunavut',
'XX' : 'TEST',
}
US_SAME_CODE={
'01001' : 'Autauga',
'01003' : 'Baldwin',
'01005' : 'Barbour',
'01007' : 'Bibb',
'01009' : 'Blount',
'01011' : 'Bullock',
'01013' : 'Butler',
'01015' : 'Calhoun',
'01017' : 'Chambers',
'01019' : 'Cherokee',
'01021' : 'Chilton',
'01023' : 'Choctaw',
'01025' : 'Clarke',
'01027' : 'Clay',
'01029' : 'Cleburne',
'01031' : 'Coffee',
'01033' : 'Colbert',
'01035' : 'Conecuh',
'01037' : 'Coosa',
'01039' : 'Covington',
'01041' : 'Crenshaw',
'01043' : 'Cullman',
'01045' : 'Dale',
'01047' : 'Dallas',
'01049' : 'Dekalb',
'01051' : 'Elmore',
'01053' : 'Escambia',
'01055' : 'Etowah',
'01057' : 'Fayette',
'01059' : 'Franklin',
'01061' : 'Geneva',
'01063' : 'Greene',
'01065' : 'Hale',
'01067' : 'Henry',
'01069' : 'Houston',
'01071' : 'Jackson',
'01073' : 'Jefferson',
'01075' : 'Lamar',
'01077' : 'Lauderdale',
'01079' : 'Lawrence',
'01081' : 'Lee',
'01083' : 'Limestone',
'01085' : 'Lowndes',
'01087' : 'Macon',
'01089' : 'Madison',
'01091' : 'Marengo',
'01093' : 'Marion',
'01095' : 'Marshall',
'01097' : 'Mobile',
'01099' : 'Monroe',
'01101' : 'Montgomery',
'01103' : 'Morgan',
'01105' : 'Perry',
'01107' : 'Pickens',
'01109' : 'Pike',
'01111' : 'Randolph',
'01113' : 'Russell',
'01115' : 'Saint Clair',
'01117' : 'Shelby',
'01119' : 'Sumter',
'01121' : 'Talladega',
'01123' : 'Tallapoosa',
'01125' : 'Tuscaloosa',
'01127' : 'Walker',
'01129' : 'Washington',
'01131' : 'Wilcox',
'01133' : 'Winston',
'02013' : 'Aleutians East',
'02016' : 'Aleutians West',
'02020' : 'Anchorage',
'02050' : 'Bethel',
'02060' : 'Bristol Bay',
'02068' : 'Denali',
'02070' : 'Dillingham',
'02090' : 'Fairbanks North Star',
'02100' : 'Haines',
'02110' : 'Juneau',
'02122' : 'Kenai Peninsula',
'02130' : 'Ketchikan Gateway',
'02150' : 'Kodiak Island',
'02164' : 'Lake and Peninsula',
'02170' : 'Matanuska-Susitna',
'02180' : 'Nome',
'02185' : 'North Slope',
'02188' : 'Northwest Arctic',
'02201' : 'Prince of Wales-Outer Ketchikan',
'02220' : 'Sitka',
'02232' : 'Skagway-Hoonah-Angoon',
'02240' : 'Southeast Fairbanks',
'02261' : 'Valdez-Cordova',
'02270' : 'Wade Hampton',
'02280' : 'Wrangell-Petersburg',
'02282' : 'Yakutat',
'02290' : 'Yukon-Koyukuk',
'04001' : 'Apache',
'04003' : 'Cochise',
'04005' : 'Coconino',
'04007' : 'Gila',
'04009' : 'Graham',
'04011' : 'Greenlee',
'04012' : 'La Paz',
'04013' : 'Maricopa',
'04015' : 'Mohave',
'04017' : 'Navajo',
'04019' : 'Pima',
'04021' : 'Pinal',
'04023' : 'Santa Cruz',
'04025' : 'Yavapai',
'04027' : 'Yuma',
'05001' : 'Arkansas',
'05003' : 'Ashley',
'05005' : 'Baxter',
'05007' : 'Benton',
'05009' : 'Boone',
'05011' : 'Bradley',
'05013' : 'Calhoun',
'05015' : 'Carroll',
'05017' : 'Chicot',
'05019' : 'Clark',
'05021' : 'Clay',
'05023' : 'Cleburne',
'05025' : 'Cleveland',
'05027' : 'Columbia',
'05029' : 'Conway',
'05031' : 'Craighead',
'05033' : 'Crawford',
'05035' : 'Crittenden',
'05037' : 'Cross',
'05039' : 'Dallas',
'05041' : 'Desha',
'05043' : 'Drew',
'05045' : 'Faulkner',
'05047' : 'Franklin',
'05049' : 'Fulton',
'05051' : 'Garland',
'05053' : 'Grant',
'05055' : 'Greene',
'05057' : 'Hempstead',
'05059' : 'Hot Spring',
'05061' : 'Howard',
'05063' : 'Independence',
'05065' : 'Izard',
'05067' : 'Jackson',
'05069' : 'Jefferson',
'05071' : 'Johnson',
'05073' : 'Lafayette',
'05075' : 'Lawrence',
'05077' : 'Lee',
'05079' : 'Lincoln',
'05081' : 'Little River',
'05083' : 'Logan',
'05085' : 'Lonoke',
'05087' : 'Madison',
'05089' : 'Marion',
'05091' : 'Miller',
'05093' : 'Mississippi',
'05095' : 'Monroe',
'05097' : 'Montgomery',
'05099' : 'Nevada',
'05101' : 'Newton',
'05103' : 'Ouachita',
'05105' : 'Perry',
'05107' : 'Phillips',
'05109' : 'Pike',
'05111' : 'Poinsett',
'05113' : 'Polk',
'05115' : 'Pope',
'05117' : 'Prairie',
'05119' : 'Pulaski',
'05121' : 'Randolph',
'05123' : 'Saint Francis',
'05125' : 'Saline',
'05127' : 'Scott',
'05129' : 'Searcy',
'05131' : 'Sebastian',
'05133' : 'Sevier',
'05135' : 'Sharp',
'05137' : 'Stone',
'05139' : 'Union',
'05141' : 'Van Buren',
'05143' : 'Washington',
'05145' : 'White',
'05147' : 'Woodruff',
'05149' : 'Yell',
'06001' : 'Alameda',
'06003' : 'Alpine',
'06005' : 'Amador',
'06007' : 'Butte',
'06009' : 'Calaveras',
'06011' : 'Colusa',
'06013' : 'Contra Costa',
'06015' : 'Del Norte',
'06017' : 'El Dorado',
'06019' : 'Fresno',
'06021' : 'Glenn',
'06023' : 'Humboldt',
'06025' : 'Imperial',
'06027' : 'Inyo',
'06029' : 'Kern',
'06031' : 'Kings',
'06033' : 'Lake',
'06035' : 'Lassen',
'06037' : 'Los Angeles',
'06039' : 'Madera',
'06041' : 'Marin',
'06043' : 'Mariposa',
'06045' : 'Mendocino',
'06047' : 'Merced',
'06049' : 'Modoc',
'06051' : 'Mono',
'06053' : 'Monterey',
'06055' : 'Napa',
'06057' : 'Nevada',
'06059' : 'Orange',
'06061' : 'Placer',
'06063' : 'Plumas',
'06065' : 'Riverside',
'06067' : 'Sacramento',
'06069' : 'San Benito',
'06071' : 'San Bernardino',
'06073' : 'San Diego',
'06075' : 'San Francisco',
'06077' : 'San Joaquin',
'06079' : 'San Luis Obispo',
'06081' : 'San Mateo',
'06083' : 'Santa Barbara',
'06085' : 'Santa Clara',
'06087' : 'Santa Cruz',
'06089' : 'Shasta',
'06091' : 'Sierra',
'06093' : 'Siskiyou',
'06095' : 'Solano',
'06097' : 'Sonoma',
'06099' : 'Stanislaus',
'06101' : 'Sutter',
'06103' : 'Tehama',
'06105' : 'Trinity',
'06107' : 'Tulare',
'06109' : 'Tuolumne',
'06111' : 'Ventura',
'06113' : 'Yolo',
'06115' : 'Yuba',
'08001' : 'Adams',
'08003' : 'Alamosa',
'08005' : 'Arapahoe',
'08007' : 'Archuleta',
'08009' : 'Baca',
'08011' : 'Bent',
'08013' : 'Boulder',
'08014' : 'Broomfield',
'08015' : 'Chaffee',
'08017' : 'Cheyenne',
'08019' : 'Clear Creek',
'08021' : 'Conejos',
'08023' : 'Costilla',
'08025' : 'Crowley',
'08027' : 'Custer',
'08029' : 'Delta',
'08031' : 'Denver',
'08033' : 'Dolores',
'08035' : 'Douglas',
'08037' : 'Eagle',
'08039' : 'Elbert',
'08041' : 'El Paso',
'08043' : 'Fremont',
'08045' : 'Garfield',
'08047' : 'Gilpin',
'08049' : 'Grand',
'08051' : 'Gunnison',
'08053' : 'Hinsdale',
'08055' : 'Huerfano',
'08057' : 'Jackson',
'08059' : 'Jefferson',
'08061' : 'Kiowa',
'08063' : 'Kit Carson',
'08065' : 'Lake',
'08067' : 'La Plata',
'08069' : 'Larimer',
'08071' : 'Las Animas',
'08073' : 'Lincoln',
'08075' : 'Logan',
'08077' : 'Mesa',
'08079' : 'Mineral',
'08081' : 'Moffat',
'08083' : 'Montezuma',
'08085' : 'Montrose',
'08087' : 'Morgan',
'08089' : 'Otero',
'08091' : 'Ouray',
'08093' : 'Park',
'08095' : 'Phillips',
'08097' : 'Pitkin',
'08099' : 'Prowers',
'08101' : 'Pueblo',
'08103' : 'Rio Blanco',
'08105' : 'Rio Grande',
'08107' : 'Routt',
'08109' : 'Saguache',
'08111' : 'San Juan',
'08113' : 'San Miguel',
'08115' : 'Sedgwick',
'08117' : 'Summit',
'08119' : 'Teller',
'08121' : 'Washington',
'08123' : 'Weld',
'08125' : 'Yuma',
'09001' : 'Fairfield',
'09003' : 'Hartford',
'09005' : 'Litchfield',
'09007' : 'Middlesex',
'09009' : 'New Haven',
'09011' : 'New London',
'09013' : 'Tolland',
'09015' : 'Windham',
'10001' : 'Kent',
'10003' : 'New Castle',
'10005' : 'Sussex',
'11001' : 'District of Columbia',
'12001' : 'Alachua',
'12003' : 'Baker',
'12005' : 'Bay',
'12007' : 'Bradford',
'12009' : 'Brevard',
'12011' : 'Broward',
'12013' : 'Calhoun',
'12015' : 'Charlotte',
'12017' : 'Citrus',
'12019' : 'Clay',
'12021' : 'Collier',
'12023' : 'Columbia',
'12027' : 'Desoto',
'12029' : 'Dixie',
'12031' : 'Duval',
'12033' : 'Escambia',
'12035' : 'Flagler',
'12037' : 'Franklin',
'12039' : 'Gadsden',
'12041' : 'Gilchrist',
'12043' : 'Glades',
'12045' : 'Gulf',
'12047' : 'Hamilton',
'12049' : 'Hardee',
'12051' : 'Hendry',
'12053' : 'Hernando',
'12055' : 'Highlands',
'12057' : 'Hillsborough',
'12059' : 'Holmes',
'12061' : 'Indian River',
'12063' : 'Jackson',
'12065' : 'Jefferson',
'12067' : 'Lafayette',
'12069' : 'Lake',
'12071' : 'Lee',
'12073' : 'Leon',
'12075' : 'Levy',
'12077' : 'Liberty',
'12079' : 'Madison',
'12081' : 'Manatee',
'12083' : 'Marion',
'12085' : 'Martin',
'12086' : 'Miami-Dade',
'12087' : 'Monroe',
'12089' : 'Nassau',
'12091' : 'Okaloosa',
'12093' : 'Okeechobee',
'12095' : 'Orange',
'12097' : 'Osceola',
'12099' : 'Palm Beach',
'12101' : 'Pasco',
'12103' : 'Pinellas',
'12105' : 'Polk',
'12107' : 'Putnam',
'12109' : 'Saint Johns',
'12111' : 'Saint Lucie',
'12113' : 'Santa Rosa',
'12115' : 'Sarasota',
'12117' : 'Seminole',
'12119' : 'Sumter',
'12121' : 'Suwannee',
'12123' : 'Taylor',
'12125' : 'Union',
'12127' : 'Volusia',
'12129' : 'Wakulla',
'12131' : 'Walton',
'12133' : 'Washington',
'13001' : 'Appling',
'13003' : 'Atkinson',
'13005' : 'Bacon',
'13007' : 'Baker',
'13009' : 'Baldwin',
'13011' : 'Banks',
'13013' : 'Barrow',
'13015' : 'Bartow',
'13017' : 'Ben Hill',
'13019' : 'Berrien',
'13021' : 'Bibb',
'13023' : 'Bleckley',
'13025' : 'Brantley',
'13027' : 'Brooks',
'13029' : 'Bryan',
'13031' : 'Bulloch',
'13033' : 'Burke',
'13035' : 'Butts',
'13037' : 'Calhoun',
'13039' : 'Camden',
'13043' : 'Candler',
'13045' : 'Carroll',
'13047' : 'Catoosa',
'13049' : 'Charlton',
'13051' : 'Chatham',
'13053' : 'Chattahoochee',
'13055' : 'Chattooga',
'13057' : 'Cherokee',
'13059' : 'Clarke',
'13061' : 'Clay',
'13063' : 'Clayton',
'13065' : 'Clinch',
'13067' : 'Cobb',
'13069' : 'Coffee',
'13071' : 'Colquitt',
'13073' : 'Columbia',
'13075' : 'Cook',
'13077' : 'Coweta',
'13079' : 'Crawford',
'13081' : 'Crisp',
'13083' : 'Dade',
'13085' : 'Dawson',
'13087' : 'Decatur',
'13089' : 'Dekalb',
'13091' : 'Dodge',
'13093' : 'Dooly',
'13095' : 'Dougherty',
'13097' : 'Douglas',
'13099' : 'Early',
'13101' : 'Echols',
'13103' : 'Effingham',
'13105' : 'Elbert',
'13107' : 'Emanuel',
'13109' : 'Evans',
'13111' : 'Fannin',
'13113' : 'Fayette',
'13115' : 'Floyd',
'13117' : 'Forsyth',
'13119' : 'Franklin',
'13121' : 'Fulton',
'13123' : 'Gilmer',
'13125' : 'Glascock',
'13127' : 'Glynn',
'13129' : 'Gordon',
'13131' : 'Grady',
'13133' : 'Greene',
'13135' : 'Gwinnett',
'13137' : 'Habersham',
'13139' : 'Hall',
'13141' : 'Hancock',
'13143' : 'Haralson',
'13145' : 'Harris',
'13147' : 'Hart',
'13149' : 'Heard',
'13151' : 'Henry',
'13153' : 'Houston',
'13155' : 'Irwin',
'13157' : 'Jackson',
'13159' : 'Jasper',
'13161' : 'Jeff Davis',
'13163' : 'Jefferson',
'13165' : 'Jenkins',
'13167' : 'Johnson',
'13169' : 'Jones',
'13171' : 'Lamar',
'13173' : 'Lanier',
'13175' : 'Laurens',
'13177' : 'Lee',
'13179' : 'Liberty',
'13181' : 'Lincoln',
'13183' : 'Long',
'13185' : 'Lowndes',
'13187' : 'Lumpkin',
'13189' : 'Mcduffie',
'13191' : 'Mcintosh',
'13193' : 'Macon',
'13195' : 'Madison',
'13197' : 'Marion',
'13199' : 'Meriwether',
'13201' : 'Miller',
'13205' : 'Mitchell',
'13207' : 'Monroe',
'13209' : 'Montgomery',
'13211' : 'Morgan',
'13213' : 'Murray',
'13215' : 'Muscogee',
'13217' : 'Newton',
'13219' : 'Oconee',
'13221' : 'Oglethorpe',
'13223' : 'Paulding',
'13225' : 'Peach',
'13227' : 'Pickens',
'13229' : 'Pierce',
'13231' : 'Pike',
'13233' : 'Polk',
'13235' : 'Pulaski',
'13237' : 'Putnam',
'13239' : 'Quitman',
'13241' : 'Rabun',
'13243' : 'Randolph',
'13245' : 'Richmond',
'13247' : 'Rockdale',
'13249' : 'Schley',
'13251' : 'Screven',
'13253' : 'Seminole',
'13255' : 'Spalding',
'13257' : 'Stephens',
'13259' : 'Stewart',
'13261' : 'Sumter',
'13263' : 'Talbot',
'13265' : 'Taliaferro',
'13267' : 'Tattnall',
'13269' : 'Taylor',
'13271' : 'Telfair',
'13273' : 'Terrell',
'13275' : 'Thomas',
'13277' : 'Tift',
'13279' : 'Toombs',
'13281' : 'Towns',
'13283' : 'Treutlen',
'13285' : 'Troup',
'13287' : 'Turner',
'13289' : 'Twiggs',
'13291' : 'Union',
'13293' : 'Upson',
'13295' : 'Walker',
'13297' : 'Walton',
'13299' : 'Ware',
'13301' : 'Warren',
'13303' : 'Washington',
'13305' : 'Wayne',
'13307' : 'Webster',
'13309' : 'Wheeler',
'13311' : 'White',
'13313' : 'Whitfield',
'13315' : 'Wilcox',
'13317' : 'Wilkes',
'13319' : 'Wilkinson',
'13321' : 'Worth',
'15001' : 'Hawaii',
'15003' : 'Honolulu',
'15005' : 'Kalawao',
'15007' : 'Kauai',
'15009' : 'Maui',
'16001' : 'Ada',
'16003' : 'Adams',
'16005' : 'Bannock',
'16007' : 'Bear Lake',
'16009' : 'Benewah',
'16011' : 'Bingham',
'16013' : 'Blaine',
'16015' : 'Boise',
'16017' : 'Bonner',
'16019' : 'Bonneville',
'16021' : 'Boundary',
'16023' : 'Butte',
'16025' : 'Camas',
'16027' : 'Canyon',
'16029' : 'Caribou',
'16031' : 'Cassia',
'16033' : 'Clark',
'16035' : 'Clearwater',
'16037' : 'Custer',
'16039' : 'Elmore',
'16041' : 'Franklin',
'16043' : 'Fremont',
'16045' : 'Gem',
'16047' : 'Gooding',
'16049' : 'Idaho',
'16051' : 'Jefferson',
'16053' : 'Jerome',
'16055' : 'Kootenai',
'16057' : 'Latah',
'16059' : 'Lemhi',
'16061' : 'Lewis',
'16063' : 'Lincoln',
'16065' : 'Madison',
'16067' : 'Minidoka',
'16069' : 'Nez Perce',
'16071' : 'Oneida',
'16073' : 'Owyhee',
'16075' : 'Payette',
'16077' : 'Power',
'16079' : 'Shoshone',
'16081' : 'Teton',
'16083' : 'Twin Falls',
'16085' : 'Valley',
'16087' : 'Washington',
'17001' : 'Adams',
'17003' : 'Alexander',
'17005' : 'Bond',
'17007' : 'Boone',
'17009' : 'Brown',
'17011' : 'Bureau',
'17013' : 'Calhoun',
'17015' : 'Carroll',
'17017' : 'Cass',
'17019' : 'Champaign',
'17021' : 'Christian',
'17023' : 'Clark',
'17025' : 'Clay',
'17027' : 'Clinton',
'17029' : 'Coles',
'17031' : 'Cook',
'17033' : 'Crawford',
'17035' : 'Cumberland',
'17037' : 'Dekalb',
'17039' : 'De Witt',
'17041' : 'Douglas',
'17043' : 'Dupage',
'17045' : 'Edgar',
'17047' : 'Edwards',
'17049' : 'Effingham',
'17051' : 'Fayette',
'17053' : 'Ford',
'17055' : 'Franklin',
'17057' : 'Fulton',
'17059' : 'Gallatin',
'17061' : 'Greene',
'17063' : 'Grundy',
'17065' : 'Hamilton',
'17067' : 'Hancock',
'17069' : 'Hardin',
'17071' : 'Henderson',
'17073' : 'Henry',
'17075' : 'Iroquois',
'17077' : 'Jackson',
'17079' : 'Jasper',
'17081' : 'Jefferson',
'17083' : 'Jersey',
'17085' : 'Jo Daviess',
'17087' : 'Johnson',
'17089' : 'Kane',
'17091' : 'Kankakee',
'17093' : 'Kendall',
'17095' : 'Knox',
'17097' : 'Lake',
'17099' : 'La Salle',
'17101' : 'Lawrence',
'17103' : 'Lee',
'17105' : 'Livingston',
'17107' : 'Logan',
'17109' : 'Mcdonough',
'17111' : 'Mchenry',
'17113' : 'Mclean',
'17115' : 'Macon',
'17117' : 'Macoupin',
'17119' : 'Madison',
'17121' : 'Marion',
'17123' : 'Marshall',
'17125' : 'Mason',
'17127' : 'Massac',
'17129' : 'Menard',
'17131' : 'Mercer',
'17133' : 'Monroe',
'17135' : 'Montgomery',
'17137' : 'Morgan',
'17139' : 'Moultrie',
'17141' : 'Ogle',
'17143' : 'Peoria',
'17145' : 'Perry',
'17147' : 'Piatt',
'17149' : 'Pike',
'17151' : 'Pope',
'17153' : 'Pulaski',
'17155' : 'Putnam',
'17157' : 'Randolph',
'17159' : 'Richland',
'17161' : 'Rock Island',
'17163' : 'Saint Clair',
'17165' : 'Saline',
'17167' : 'Sangamon',
'17169' : 'Schuyler',
'17171' : 'Scott',
'17173' : 'Shelby',
'17175' : 'Stark',
'17177' : 'Stephenson',
'17179' : 'Tazewell',
'17181' : 'Union',
'17183' : 'Vermilion',
'17185' : 'Wabash',
'17187' : 'Warren',
'17189' : 'Washington',
'17191' : 'Wayne',
'17193' : 'White',
'17195' : 'Whiteside',
'17197' : 'Will',
'17199' : 'Williamson',
'17201' : 'Winnebago',
'17203' : 'Woodford',
'18001' : 'Adams',
'18003' : 'Allen',
'18005' : 'Bartholomew',
'18007' : 'Benton',
'18009' : 'Blackford',
'18011' : 'Boone',
'18013' : 'Brown',
'18015' : 'Carroll',
'18017' : 'Cass',
'18019' : 'Clark',
'18021' : 'Clay',
'18023' : 'Clinton',
'18025' : 'Crawford',
'18027' : 'Daviess',
'18029' : 'Dearborn',
'18031' : 'Decatur',
'18033' : 'De Kalb',
'18035' : 'Delaware',
'18037' : 'Dubois',
'18039' : 'Elkhart',
'18041' : 'Fayette',
'18043' : 'Floyd',
'18045' : 'Fountain',
'18047' : 'Franklin',
'18049' : 'Fulton',
'18051' : 'Gibson',
'18053' : 'Grant',
'18055' : 'Greene',
'18057' : 'Hamilton',
'18059' : 'Hancock',
'18061' : 'Harrison',
'18063' : 'Hendricks',
'18065' : 'Henry',
'18067' : 'Howard',
'18069' : 'Huntington',
'18071' : 'Jackson',
'18073' : 'Jasper',
'18075' : 'Jay',
'18077' : 'Jefferson',
'18079' : 'Jennings',
'18081' : 'Johnson',
'18083' : 'Knox',
'18085' : 'Kosciusko',
'18087' : 'Lagrange',
'18089' : 'Lake',
'18091' : 'La Porte',
'18093' : 'Lawrence',
'18095' : 'Madison',
'18097' : 'Marion',
'18099' : 'Marshall',
'18101' : 'Martin',
'18103' : 'Miami',
'18105' : 'Monroe',
'18107' : 'Montgomery',
'18109' : 'Morgan',
'18111' : 'Newton',
'18113' : 'Noble',
'18115' : 'Ohio',
'18117' : 'Orange',
'18119' : 'Owen',
'18121' : 'Parke',
'18123' : 'Perry',
'18125' : 'Pike',
'18127' : 'Porter',
'18129' : 'Posey',
'18131' : 'Pulaski',
'18133' : 'Putnam',
'18135' : 'Randolph',
'18137' : 'Ripley',
'18139' : 'Rush',
'18141' : 'Saint Joseph',
'18143' : 'Scott',
'18145' : 'Shelby',
'18147' : 'Spencer',
'18149' : 'Starke',
'18151' : 'Steuben',
'18153' : 'Sullivan',
'18155' : 'Switzerland',
'18157' : 'Tippecanoe',
'18159' : 'Tipton',
'18161' : 'Union',
'18163' : 'Vanderburgh',
'18165' : 'Vermillion',
'18167' : 'Vigo',
'18169' : 'Wabash',
'18171' : 'Warren',
'18173' : 'Warrick',
'18175' : 'Washington',
'18177' : 'Wayne',
'18179' : 'Wells',
'18181' : 'White',
'18183' : 'Whitley',
'19001' : 'Adair',
'19003' : 'Adams',
'19005' : 'Allamakee',
'19007' : 'Appanoose',
'19009' : 'Audubon',
'19011' : 'Benton',
'19013' : 'Black Hawk',
'19015' : 'Boone',
'19017' : 'Bremer',
'19019' : 'Buchanan',
'19021' : 'Buena Vista',
'19023' : 'Butler',
'19025' : 'Calhoun',
'19027' : 'Carroll',
'19029' : 'Cass',
'19031' : 'Cedar',
'19033' : 'Cerro Gordo',
'19035' : 'Cherokee',
'19037' : 'Chickasaw',
'19039' : 'Clarke',
'19041' : 'Clay',
'19043' : 'Clayton',
'19045' : 'Clinton',
'19047' : 'Crawford',
'19049' : 'Dallas',
'19051' : 'Davis',
'19053' : 'Decatur',
'19055' : 'Delaware',
'19057' : 'Des Moines',
'19059' : 'Dickinson',
'19061' : 'Dubuque',
'19063' : 'Emmet',
'19065' : 'Fayette',
'19067' : 'Floyd',
'19069' : 'Franklin',
'19071' : 'Fremont',
'19073' : 'Greene',
'19075' : 'Grundy',
'19077' : 'Guthrie',
'19079' : 'Hamilton',
'19081' : 'Hancock',
'19083' : 'Hardin',
'19085' : 'Harrison',
'19087' : 'Henry',
'19089' : 'Howard',
'19091' : 'Humboldt',
'19093' : 'Ida',
'19095' : 'Iowa',
'19097' : 'Jackson',
'19099' : 'Jasper',
'19101' : 'Jefferson',
'19103' : 'Johnson',
'19105' : 'Jones',
'19107' : 'Keokuk',
'19109' : 'Kossuth',
'19111' : 'Lee',
'19113' : 'Linn',
'19115' : 'Louisa',
'19117' : 'Lucas',
'19119' : 'Lyon',
'19121' : 'Madison',
'19123' : 'Mahaska',
'19125' : 'Marion',
'19127' : 'Marshall',
'19129' : 'Mills',
'19131' : 'Mitchell',
'19133' : 'Monona',
'19135' : 'Monroe',
'19137' : 'Montgomery',
'19139' : 'Muscatine',
'19141' : 'O\'Brien',
'19143' : 'Osceola',
'19145' : 'Page',
'19147' : 'Palo Alto',
'19149' : 'Plymouth',
'19151' : 'Pocahontas',
'19153' : 'Polk',
'19155' : 'Pottawattamie',
'19157' : 'Poweshiek',
'19159' : 'Ringgold',
'19161' : 'Sac',
'19163' : 'Scott',
'19165' : 'Shelby',
'19167' : 'Sioux',
'19169' : 'Story',
'19171' : 'Tama',
'19173' : 'Taylor',
'19175' : 'Union',
'19177' : 'Van Buren',
'19179' : 'Wapello',
'19181' : 'Warren',
'19183' : 'Washington',
'19185' : 'Wayne',
'19187' : 'Webster',
'19189' : 'Winnebago',
'19191' : 'Winneshiek',
'19193' : 'Woodbury',
'19195' : 'Worth',
'19197' : 'Wright',
'20001' : 'Allen',
'20003' : 'Anderson',
'20005' : 'Atchison',
'20007' : 'Barber',
'20009' : 'Barton',
'20011' : 'Bourbon',
'20013' : 'Brown',
'20015' : 'Butler',
'20017' : 'Chase',
'20019' : 'Chautauqua',
'20021' : 'Cherokee',
'20023' : 'Cheyenne',
'20025' : 'Clark',
'20027' : 'Clay',
'20029' : 'Cloud',
'20031' : 'Coffey',
'20033' : 'Comanche',
'20035' : 'Cowley',
'20037' : 'Crawford',
'20039' : 'Decatur',
'20041' : 'Dickinson',
'20043' : 'Doniphan',
'20045' : 'Douglas',
'20047' : 'Edwards',
'20049' : 'Elk',
'20051' : 'Ellis',
'20053' : 'Ellsworth',
'20055' : 'Finney',
'20057' : 'Ford',
'20059' : 'Franklin',
'20061' : 'Geary',
'20063' : 'Gove',
'20065' : 'Graham',
'20067' : 'Grant',
'20069' : 'Gray',
'20071' : 'Greeley',
'20073' : 'Greenwood',
'20075' : 'Hamilton',
'20077' : 'Harper',
'20079' : 'Harvey',
'20081' : 'Haskell',
'20083' : 'Hodgeman',
'20085' : 'Jackson',
'20087' : 'Jefferson',
'20089' : 'Jewell',
'20091' : 'Johnson',
'20093' : 'Kearny',
'20095' : 'Kingman',
'20097' : 'Kiowa',
'20099' : 'Labette',
'20101' : 'Lane',
'20103' : 'Leavenworth',
'20105' : 'Lincoln',
'20107' : 'Linn',
'20109' : 'Logan',
'20111' : 'Lyon',
'20113' : 'Mcpherson',
'20115' : 'Marion',
'20117' : 'Marshall',
'20119' : 'Meade',
'20121' : 'Miami',
'20123' : 'Mitchell',
'20125' : 'Montgomery',
'20127' : 'Morris',
'20129' : 'Morton',
'20131' : 'Nemaha',
'20133' : 'Neosho',
'20135' : 'Ness',
'20137' : 'Norton',
'20139' : 'Osage',
'20141' : 'Osborne',
'20143' : 'Ottawa',
'20145' : 'Pawnee',
'20147' : 'Phillips',
'20149' : 'Pottawatomie',
'20151' : 'Pratt',
'20153' : 'Rawlins',
'20155' : 'Reno',
'20157' : 'Republic',
'20159' : 'Rice',
'20161' : 'Riley',
'20163' : 'Rooks',
'20165' : 'Rush',
'20167' : 'Russell',
'20169' : 'Saline',
'20171' : 'Scott',
'20173' : 'Sedgwick',
'20175' : 'Seward',
'20177' : 'Shawnee',
'20179' : 'Sheridan',
'20181' : 'Sherman',
'20183' : 'Smith',
'20185' : 'Stafford',
'20187' : 'Stanton',
'20189' : 'Stevens',
'20191' : 'Sumner',
'20193' : 'Thomas',
'20195' : 'Trego',
'20197' : 'Wabaunsee',
'20199' : 'Wallace',
'20201' : 'Washington',
'20203' : 'Wichita',
'20205' : 'Wilson',
'20207' : 'Woodson',
'20209' : 'Wyandotte',
'21001' : 'Adair',
'21003' : 'Allen',
'21005' : 'Anderson',
'21007' : 'Ballard',
'21009' : 'Barren',
'21011' : 'Bath',
'21013' : 'Bell',
'21015' : 'Boone',
'21017' : 'Bourbon',
'21019' : 'Boyd',
'21021' : 'Boyle',
'21023' : 'Bracken',
'21025' : 'Breathitt',
'21027' : 'Breckinridge',
'21029' : 'Bullitt',
'21031' : 'Butler',
'21033' : 'Caldwell',
'21035' : 'Calloway',
'21037' : 'Campbell',
'21039' : 'Carlisle',
'21041' : 'Carroll',
'21043' : 'Carter',
'21045' : 'Casey',
'21047' : 'Christian',
'21049' : 'Clark',
'21051' : 'Clay',
'21053' : 'Clinton',
'21055' : 'Crittenden',
'21057' : 'Cumberland',
'21059' : 'Daviess',
'21061' : 'Edmonson',
'21063' : 'Elliott',
'21065' : 'Estill',
'21067' : 'Fayette',
'21069' : 'Fleming',
'21071' : 'Floyd',
'21073' : 'Franklin',
'21075' : 'Fulton',
'21077' : 'Gallatin',
'21079' : 'Garrard',
'21081' : 'Grant',
'21083' : 'Graves',
'21085' : 'Grayson',
'21087' : 'Green',
'21089' : 'Greenup',
'21091' : 'Hancock',
'21093' : 'Hardin',
'21095' : 'Harlan',
'21097' : 'Harrison',
'21099' : 'Hart',
'21101' : 'Henderson',
'21103' : 'Henry',
'21105' : 'Hickman',
'21107' : 'Hopkins',
'21109' : 'Jackson',
'21111' : 'Jefferson',
'21113' : 'Jessamine',
'21115' : 'Johnson',
'21117' : 'Kenton',
'21119' : 'Knott',
'21121' : 'Knox',
'21123' : 'Larue',
'21125' : 'Laurel',
'21127' : 'Lawrence',
'21129' : 'Lee',
'21131' : 'Leslie',
'21133' : 'Letcher',
'21135' : 'Lewis',
'21137' : 'Lincoln',
'21139' : 'Livingston',
'21141' : 'Logan',
'21143' : 'Lyon',
'21145' : 'Mccracken',
'21147' : 'Mccreary',
'21149' : 'Mclean',
'21151' : 'Madison',
'21153' : 'Magoffin',
'21155' : 'Marion',
'21157' : 'Marshall',
'21159' : 'Martin',
'21161' : 'Mason',
'21163' : 'Meade',
'21165' : 'Menifee',
'21167' : 'Mercer',
'21169' : 'Metcalfe',
'21171' : 'Monroe',
'21173' : 'Montgomery',
'21175' : 'Morgan',
'21177' : 'Muhlenberg',
'21179' : 'Nelson',
'21181' : 'Nicholas',
'21183' : 'Ohio',
'21185' : 'Oldham',
'21187' : 'Owen',
'21189' : 'Owsley',
'21191' : 'Pendleton',
'21193' : 'Perry',
'21195' : 'Pike',
'21197' : 'Powell',
'21199' : 'Pulaski',
'21201' : 'Robertson',
'21203' : 'Rockcastle',
'21205' : 'Rowan',
'21207' : 'Russell',
'21209' : 'Scott',
'21211' : 'Shelby',
'21213' : 'Simpson',
'21215' : 'Spencer',
'21217' : 'Taylor',
'21219' : 'Todd',
'21221' : 'Trigg',
'21223' : 'Trimble',
'21225' : 'Union',
'21227' : 'Warren',
'21229' : 'Washington',
'21231' : 'Wayne',
'21233' : 'Webster',
'21235' : 'Whitley',
'21237' : 'Wolfe',
'21239' : 'Woodford',
'22001' : 'Acadia',
'22003' : 'Allen',
'22005' : 'Ascension',
'22007' : 'Assumption',
'22009' : 'Avoyelles',
'22011' : 'Beauregard',
'22013' : 'Bienville',
'22015' : 'Bossier',
'22017' : 'Caddo',
'22019' : 'Calcasieu',
'22021' : 'Caldwell',
'22023' : 'Cameron',
'22025' : 'Catahoula',
'22027' : 'Claiborne',
'22029' : 'Concordia',
'22031' : 'De Soto',
'22033' : 'East Baton Rouge',
'22035' : 'East Carroll',
'22037' : 'East Feliciana',
'22039' : 'Evangeline',
'22041' : 'Franklin',
'22043' : 'Grant',
'22045' : 'Iberia',
'22047' : 'Iberville',
'22049' : 'Jackson',
'22051' : 'Jefferson',
'22053' : 'Jefferson Davis',
'22055' : 'Lafayette',
'22057' : 'Lafourche',
'22059' : 'La Salle',
'22061' : 'Lincoln',
'22063' : 'Livingston',
'22065' : 'Madison',
'22067' : 'Morehouse',
'22069' : 'Natchitoches',
'22071' : 'Orleans',
'22073' : 'Ouachita',
'22075' : 'Plaquemines',
'22077' : 'Pointe Coupee',
'22079' : 'Rapides',
'22081' : 'Red River',
'22083' : 'Richland',
'22085' : 'Sabine',
'22087' : 'Saint Bernard',
'22089' : 'Saint Charles',
'22091' : 'Saint Helena',
'22093' : 'Saint James',
'22095' : 'Saint John the Baptist',
'22097' : 'Saint Landry',
'22099' : 'Saint Martin',
'22101' : 'Saint Mary',
'22103' : 'Saint Tammany',
'22105' : 'Tangipahoa',
'22107' : 'Tensas',
'22109' : 'Terrebonne',
'22111' : 'Union',
'22113' : 'Vermilion',
'22115' : 'Vernon',
'22117' : 'Washington',
'22119' : 'Webster',
'22121' : 'West Baton Rouge',
'22123' : 'West Carroll',
'22125' : 'West Feliciana',
'22127' : 'Winn',
'23001' : 'Androscoggin',
'23003' : 'Aroostook',
'23005' : 'Cumberland',
'23007' : 'Franklin',
'23009' : 'Hancock',
'23011' : 'Kennebec',
'23013' : 'Knox',
'23015' : 'Lincoln',
'23017' : 'Oxford',
'23019' : 'Penobscot',
'23021' : 'Piscataquis',
'23023' : 'Sagadahoc',
'23025' : 'Somerset',
'23027' : 'Waldo',
'23029' : 'Washington',
'23031' : 'York',
'24001' : 'Allegany',
'24003' : 'Anne Arundel',
'24005' : 'Baltimore',
'24009' : 'Calvert',
'24011' : 'Caroline',
'24013' : 'Carroll',
'24015' : 'Cecil',
'24017' : 'Charles',
'24019' : 'Dorchester',
'24021' : 'Frederick',
'24023' : 'Garrett',
'24025' : 'Harford',
'24027' : 'Howard',
'24029' : 'Kent',
'24031' : 'Montgomery',
'24033' : 'Prince George\'s',
'24035' : 'Queen Anne\'s',
'24037' : 'Saint Mary\'s',
'24039' : 'Somerset',
'24041' : 'Talbot',
'24043' : 'Washington',
'24045' : 'Wicomico',
'24047' : 'Worcester',
'24510' : 'City of Baltimore',
'25001' : 'Barnstable',
'25003' : 'Berkshire',
'25005' : 'Bristol',
'25007' : 'Dukes',
'25009' : 'Essex',
'25011' : 'Franklin',
'25013' : 'Hampden',
'25015' : 'Hampshire',
'25017' : 'Middlesex',
'25019' : 'Nantucket',
'25021' : 'Norfolk',
'25023' : 'Plymouth',
'25025' : 'Suffolk',
'25027' : 'Worcester',
'26001' : 'Alcona',
'26003' : 'Alger',
'26005' : 'Allegan',
'26007' : 'Alpena',
'26009' : 'Antrim',
'26011' : 'Arenac',
'26013' : 'Baraga',
'26015' : 'Barry',
'26017' : 'Bay',
'26019' : 'Benzie',
'26021' : 'Berrien',
'26023' : 'Branch',
'26025' : 'Calhoun',
'26027' : 'Cass',
'26029' : 'Charlevoix',
'26031' : 'Cheboygan',
'26033' : 'Chippewa',
'26035' : 'Clare',
'26037' : 'Clinton',
'26039' : 'Crawford',
'26041' : 'Delta',
'26043' : 'Dickinson',
'26045' : 'Eaton',
'26047' : 'Emmet',
'26049' : 'Genesee',
'26051' : 'Gladwin',
'26053' : 'Gogebic',
'26055' : 'Grand Traverse',
'26057' : 'Gratiot',
'26059' : 'Hillsdale',
'26061' : 'Houghton',
'26063' : 'Huron',
'26065' : 'Ingham',
'26067' : 'Ionia',
'26069' : 'Iosco',
'26071' : 'Iron',
'26073' : 'Isabella',
'26075' : 'Jackson',
'26077' : 'Kalamazoo',
'26079' : 'Kalkaska',
'26081' : 'Kent',
'26083' : 'Keweenaw',
'26085' : 'Lake',
'26087' : 'Lapeer',
'26089' : 'Leelanau',
'26091' : 'Lenawee',
'26093' : 'Livingston',
'26095' : 'Luce',
'26097' : 'Mackinac',
'26099' : 'Macomb',
'26101' : 'Manistee',
'26103' : 'Marquette',
'26105' : 'Mason',
'26107' : 'Mecosta',
'26109' : 'Menominee',
'26111' : 'Midland',
'26113' : 'Missaukee',
'26115' : 'Monroe',
'26117' : 'Montcalm',
'26119' : 'Montmorency',
'26121' : 'Muskegon',
'26123' : 'Newaygo',
'26125' : 'Oakland',
'26127' : 'Oceana',
'26129' : 'Ogemaw',
'26131' : 'Ontonagon',
'26133' : 'Osceola',
'26135' : 'Oscoda',
'26137' : 'Otsego',
'26139' : 'Ottawa',
'26141' : 'Presque Isle',
'26143' : 'Roscommon',
'26145' : 'Saginaw',
'26147' : 'Saint Clair',
'26149' : 'Saint Joseph',
'26151' : 'Sanilac',
'26153' : 'Schoolcraft',
'26155' : 'Shiawassee',
'26157' : 'Tuscola',
'26159' : 'Van Buren',
'26161' : 'Washtenaw',
'26163' : 'Wayne',
'26165' : 'Wexford',
'27001' : 'Aitkin',
'27003' : 'Anoka',
'27005' : 'Becker',
'27007' : 'Beltrami',
'27009' : 'Benton',
'27011' : 'Big Stone',
'27013' : 'Blue Earth',
'27015' : 'Brown',
'27017' : 'Carlton',
'27019' : 'Carver',
'27021' : 'Cass',
'27023' : 'Chippewa',
'27025' : 'Chisago',
'27027' : 'Clay',
'27029' : 'Clearwater',
'27031' : 'Cook',
'27033' : 'Cottonwood',
'27035' : 'Crow Wing',
'27037' : 'Dakota',
'27039' : 'Dodge',
'27041' : 'Douglas',
'27043' : 'Faribault',
'27045' : 'Fillmore',
'27047' : 'Freeborn',
'27049' : 'Goodhue',
'27051' : 'Grant',
'27053' : 'Hennepin',
'27055' : 'Houston',
'27057' : 'Hubbard',
'27059' : 'Isanti',
'27061' : 'Itasca',
'27063' : 'Jackson',
'27065' : 'Kanabec',
'27067' : 'Kandiyohi',
'27069' : 'Kittson',
'27071' : 'Koochiching',
'27073' : 'Lac Qui Parle',
'27075' : 'Lake',
'27077' : 'Lake of the Woods',
'27079' : 'Le Sueur',
'27081' : 'Lincoln',
'27083' : 'Lyon',
'27085' : 'Mcleod',
'27087' : 'Mahnomen',
'27089' : 'Marshall',
'27091' : 'Martin',
'27093' : 'Meeker',
'27095' : 'Mille Lacs',
'27097' : 'Morrison',
'27099' : 'Mower',
'27101' : 'Murray',
'27103' : 'Nicollet',
'27105' : 'Nobles',
'27107' : 'Norman',
'27109' : 'Olmsted',
'27111' : 'Otter Tail',
'27113' : 'Pennington',
'27115' : 'Pine',
'27117' : 'Pipestone',
'27119' : 'Polk',
'27121' : 'Pope',
'27123' : 'Ramsey',
'27125' : 'Red Lake',
'27127' : 'Redwood',
'27129' : 'Renville',
'27131' : 'Rice',
'27133' : 'Rock',
'27135' : 'Roseau',
'27137' : 'Saint Louis',
'27139' : 'Scott',
'27141' : 'Sherburne',
'27143' : 'Sibley',
'27145' : 'Stearns',
'27147' : 'Steele',
'27149' : 'Stevens',
'27151' : 'Swift',
'27153' : 'Todd',
'27155' : 'Traverse',
'27157' : 'Wabasha',
'27159' : 'Wadena',
'27161' : 'Waseca',
'27163' : 'Washington',
'27165' : 'Watonwan',
'27167' : 'Wilkin',
'27169' : 'Winona',
'27171' : 'Wright',
'27173' : 'Yellow Medicine',
'28001' : 'Adams',
'28003' : 'Alcorn',
'28005' : 'Amite',
'28007' : 'Attala',
'28009' : 'Benton',
'28011' : 'Bolivar',
'28013' : 'Calhoun',
'28015' : 'Carroll',
'28017' : 'Chickasaw',
'28019' : 'Choctaw',
'28021' : 'Claiborne',
'28023' : 'Clarke',
'28025' : 'Clay',
'28027' : 'Coahoma',
'28029' : 'Copiah',
'28031' : 'Covington',
'28033' : 'Desoto',
'28035' : 'Forrest',
'28037' : 'Franklin',
'28039' : 'George',
'28041' : 'Greene',
'28043' : 'Grenada',
'28045' : 'Hancock',
'28047' : 'Harrison',
'28049' : 'Hinds',
'28051' : 'Holmes',
'28053' : 'Humphreys',
'28055' : 'Issaquena',
'28057' : 'Itawamba',
'28059' : 'Jackson',
'28061' : 'Jasper',
'28063' : 'Jefferson',
'28065' : 'Jefferson Davis',
'28067' : 'Jones',
'28069' : 'Kemper',
'28071' : 'Lafayette',
'28073' : 'Lamar',
'28075' : 'Lauderdale',
'28077' : 'Lawrence',
'28079' : 'Leake',
'28081' : 'Lee',
'28083' : 'Leflore',
'28085' : 'Lincoln',
'28087' : 'Lowndes',
'28089' : 'Madison',
'28091' : 'Marion',
'28093' : 'Marshall',
'28095' : 'Monroe',
'28097' : 'Montgomery',
'28099' : 'Neshoba',
'28101' : 'Newton',
'28103' : 'Noxubee',
'28105' : 'Oktibbeha',
'28107' : 'Panola',
'28109' : 'Pearl River',
'28111' : 'Perry',
'28113' : 'Pike',
'28115' : 'Pontotoc',
'28117' : 'Prentiss',
'28119' : 'Quitman',
'28121' : 'Rankin',
'28123' : 'Scott',
'28125' : 'Sharkey',
'28127' : 'Simpson',
'28129' : 'Smith',
'28131' : 'Stone',
'28133' : 'Sunflower',
'28135' : 'Tallahatchie',
'28137' : 'Tate',
'28139' : 'Tippah',
'28141' : 'Tishomingo',
'28143' : 'Tunica',
'28145' : 'Union',
'28147' : 'Walthall',
'28149' : 'Warren',
'28151' : 'Washington',
'28153' : 'Wayne',
'28155' : 'Webster',
'28157' : 'Wilkinson',
'28159' : 'Winston',
'28161' : 'Yalobusha',
'28163' : 'Yazoo',
'29001' : 'Adair',
'29003' : 'Andrew',
'29005' : 'Atchison',
'29007' : 'Audrain',
'29009' : 'Barry',
'29011' : 'Barton',
'29013' : 'Bates',
'29015' : 'Benton',
'29017' : 'Bollinger',
'29019' : 'Boone',
'29021' : 'Buchanan',
'29023' : 'Butler',
'29025' : 'Caldwell',
'29027' : 'Callaway',
'29029' : 'Camden',
'29031' : 'Cape Girardeau',
'29033' : 'Carroll',
'29035' : 'Carter',
'29037' : 'Cass',
'29039' : 'Cedar',
'29041' : 'Chariton',
'29043' : 'Christian',
'29045' : 'Clark',
'29047' : 'Clay',
'29049' : 'Clinton',
'29051' : 'Cole',
'29053' : 'Cooper',
'29055' : 'Crawford',
'29057' : 'Dade',
'29059' : 'Dallas',
'29061' : 'Daviess',
'29063' : 'Dekalb',
'29065' : 'Dent',
'29067' : 'Douglas',
'29069' : 'Dunklin',
'29071' : 'Franklin',
'29073' : 'Gasconade',
'29075' : 'Gentry',
'29077' : 'Greene',
'29079' : 'Grundy',
'29081' : 'Harrison',
'29083' : 'Henry',
'29085' : 'Hickory',
'29087' : 'Holt',
'29089' : 'Howard',
'29091' : 'Howell',
'29093' : 'Iron',
'29095' : 'Jackson',
'29097' : 'Jasper',
'29099' : 'Jefferson',
'29101' : 'Johnson',
'29103' : 'Knox',
'29105' : 'Laclede',
'29107' : 'Lafayette',
'29109' : 'Lawrence',
'29111' : 'Lewis',
'29113' : 'Lincoln',
'29115' : 'Linn',
'29117' : 'Livingston',
'29119' : 'McDonald',
'29121' : 'Macon',
'29123' : 'Madison',
'29125' : 'Maries',
'29127' : 'Marion',
'29129' : 'Mercer',
'29131' : 'Miller',
'29133' : 'Mississippi',
'29135' : 'Moniteau',
'29137' : 'Monroe',
'29139' : 'Montgomery',
'29141' : 'Morgan',
'29143' : 'New Madrid',
'29145' : 'Newton',
'29147' : 'Nodaway',
'29149' : 'Oregon',
'29151' : 'Osage',
'29153' : 'Ozark',
'29155' : 'Pemiscot',
'29157' : 'Perry',
'29159' : 'Pettis',
'29161' : 'Phelps',
'29163' : 'Pike',
'29165' : 'Platte',
'29167' : 'Polk',
'29169' : 'Pulaski',
'29171' : 'Putnam',
'29173' : 'Ralls',
'29175' : 'Randolph',
'29177' : 'Ray',
'29179' : 'Reynolds',
'29181' : 'Ripley',
'29183' : 'Saint Charles',
'29185' : 'Saint Clair',
'29186' : 'Sainte Genevieve',
'29187' : 'Saint Francois',
'29189' : 'Saint Louis',
'29195' : 'Saline',
'29197' : 'Schuyler',
'29199' : 'Scotland',
'29201' : 'Scott',
'29203' : 'Shannon',
'29205' : 'Shelby',
'29207' : 'Stoddard',
'29209' : 'Stone',
'29211' : 'Sullivan',
'29213' : 'Taney',
'29215' : 'Texas',
'29217' : 'Vernon',
'29219' : 'Warren',
'29221' : 'Washington',
'29223' : 'Wayne',
'29225' : 'Webster',
'29227' : 'Worth',
'29229' : 'Wright',
'29510' : 'City of Saint Louis',
'30001' : 'Beaverhead',
'30003' : 'Big Horn',
'30005' : 'Blaine',
'30007' : 'Broadwater',
'30009' : 'Carbon',
'30011' : 'Carter',
'30013' : 'Cascade',
'30015' : 'Chouteau',
'30017' : 'Custer',
'30019' : 'Daniels',
'30021' : 'Dawson',
'30023' : 'Deer Lodge',
'30025' : 'Fallon',
'30027' : 'Fergus',
'30029' : 'Flathead',
'30031' : 'Gallatin',
'30033' : 'Garfield',
'30035' : 'Glacier',
'30037' : 'Golden Valley',
'30039' : 'Granite',
'30041' : 'Hill',
'30043' : 'Jefferson',
'30045' : 'Judith Basin',
'30047' : 'Lake',
'30049' : 'Lewis and Clark',
'30051' : 'Liberty',
'30053' : 'Lincoln',
'30055' : 'Mccone',
'30057' : 'Madison',
'30059' : 'Meagher',
'30061' : 'Mineral',
'30063' : 'Missoula',
'30065' : 'Musselshell',
'30067' : 'Park',
'30069' : 'Petroleum',
'30071' : 'Phillips',
'30073' : 'Pondera',
'30075' : 'Powder River',
'30077' : 'Powell',
'30079' : 'Prairie',
'30081' : 'Ravalli',
'30083' : 'Richland',
'30085' : 'Roosevelt',
'30087' : 'Rosebud',
'30089' : 'Sanders',
'30091' : 'Sheridan',
'30093' : 'Silver Bow',
'30095' : 'Stillwater',
'30097' : 'Sweet Grass',
'30099' : 'Teton',
'30101' : 'Toole',
'30103' : 'Treasure',
'30105' : 'Valley',
'30107' : 'Wheatland',
'30109' : 'Wibaux',
'30111' : 'Yellowstone',
'31001' : 'Adams',
'31003' : 'Antelope',
'31005' : 'Arthur',
'31007' : 'Banner',
'31009' : 'Blaine',
'31011' : 'Boone',
'31013' : 'Box Butte',
'31015' : 'Boyd',
'31017' : 'Brown',
'31019' : 'Buffalo',
'31021' : 'Burt',
'31023' : 'Butler',
'31025' : 'Cass',
'31027' : 'Cedar',
'31029' : 'Chase',
'31031' : 'Cherry',
'31033' : 'Cheyenne',
'31035' : 'Clay',
'31037' : 'Colfax',
'31039' : 'Cuming',
'31041' : 'Custer',
'31043' : 'Dakota',
'31045' : 'Dawes',
'31047' : 'Dawson',
'31049' : 'Deuel',
'31051' : 'Dixon',
'31053' : 'Dodge',
'31055' : 'Douglas',
'31057' : 'Dundy',
'31059' : 'Fillmore',
'31061' : 'Franklin',
'31063' : 'Frontier',
'31065' : 'Furnas',
'31067' : 'Gage',
'31069' : 'Garden',
'31071' : 'Garfield',
'31073' : 'Gosper',
'31075' : 'Grant',
'31077' : 'Greeley',
'31079' : 'Hall',
'31081' : 'Hamilton',
'31083' : 'Harlan',
'31085' : 'Hayes',
'31087' : 'Hitchcock',
'31089' : 'Holt',
'31091' : 'Hooker',
'31093' : 'Howard',
'31095' : 'Jefferson',
'31097' : 'Johnson',
'31099' : 'Kearney',
'31101' : 'Keith',
'31103' : 'Keya Paha',
'31105' : 'Kimball',
'31107' : 'Knox',
'31109' : 'Lancaster',
'31111' : 'Lincoln',
'31113' : 'Logan',
'31115' : 'Loup',
'31117' : 'Mcpherson',
'31119' : 'Madison',
'31121' : 'Merrick',
'31123' : 'Morrill',
'31125' : 'Nance',
'31127' : 'Nemaha',
'31129' : 'Nuckolls',
'31131' : 'Otoe',
'31133' : 'Pawnee',
'31135' : 'Perkins',
'31137' : 'Phelps',
'31139' : 'Pierce',
'31141' : 'Platte',
'31143' : 'Polk',
'31145' : 'Red Willow',
'31147' : 'Richardson',
'31149' : 'Rock',
'31151' : 'Saline',
'31153' : 'Sarpy',
'31155' : 'Saunders',
'31157' : 'Scotts Bluff',
'31159' : 'Seward',
'31161' : 'Sheridan',
'31163' : 'Sherman',
'31165' : 'Sioux',
'31167' : 'Stanton',
'31169' : 'Thayer',
'31171' : 'Thomas',
'31173' : 'Thurston',
'31175' : 'Valley',
'31177' : 'Washington',
'31179' : 'Wayne',
'31181' : 'Webster',
'31183' : 'Wheeler',
'31185' : 'York',
'32001' : 'Churchill',
'32003' : 'Clark',
'32005' : 'Douglas',
'32007' : 'Elko',
'32009' : 'Esmeralda',
'32011' : 'Eureka',
'32013' : 'Humboldt',
'32015' : 'Lander',
'32017' : 'Lincoln',
'32019' : 'Lyon',
'32021' : 'Mineral',
'32023' : 'Nye',
'32027' : 'Pershing',
'32029' : 'Storey',
'32031' : 'Washoe',
'32033' : 'White Pine',
'32510' : 'City of Carson',
'33001' : 'Belknap',
'33003' : 'Carroll',
'33005' : 'Cheshire',
'33007' : 'Coos',
'33009' : 'Grafton',
'33011' : 'Hillsborough',
'33013' : 'Merrimack',
'33015' : 'Rockingham',
'33017' : 'Strafford',
'33019' : 'Sullivan',
'34001' : 'Atlantic',
'34003' : 'Bergen',
'34005' : 'Burlington',
'34007' : 'Camden',
'34009' : 'Cape May',
'34011' : 'Cumberland',
'34013' : 'Essex',
'34015' : 'Gloucester',
'34017' : 'Hudson',
'34019' : 'Hunterdon',
'34021' : 'Mercer',
'34023' : 'Middlesex',
'34025' : 'Monmouth',
'34027' : 'Morris',
'34029' : 'Ocean',
'34031' : 'Passaic',
'34033' : 'Salem',
'34035' : 'Somerset',
'34037' : 'Sussex',
'34039' : 'Union',
'34041' : 'Warren',
'35001' : 'Bernalillo',
'35003' : 'Catron',
'35005' : 'Chaves',
'35006' : 'Cibola',
'35007' : 'Colfax',
'35009' : 'Curry',
'35011' : 'Debaca',
'35013' : 'Dona Ana',
'35015' : 'Eddy',
'35017' : 'Grant',
'35019' : 'Guadalupe',
'35021' : 'Harding',
'35023' : 'Hidalgo',
'35025' : 'Lea',
'35027' : 'Lincoln',
'35028' : 'Los Alamos',
'35029' : 'Luna',
'35031' : 'Mckinley',
'35033' : 'Mora',
'35035' : 'Otero',
'35037' : 'Quay',
'35039' : 'Rio Arriba',
'35041' : 'Roosevelt',
'35043' : 'Sandoval',
'35045' : 'San Juan',
'35047' : 'San Miguel',
'35049' : 'Santa Fe',
'35051' : 'Sierra',
'35053' : 'Socorro',
'35055' : 'Taos',
'35057' : 'Torrance',
'35059' : 'Union',
'35061' : 'Valencia',
'36001' : 'Albany',
'36003' : 'Allegany',
'36005' : 'Bronx',
'36007' : 'Broome',
'36009' : 'Cattaraugus',
'36011' : 'Cayuga',
'36013' : 'Chautauqua',
'36015' : 'Chemung',
'36017' : 'Chenango',
'36019' : 'Clinton',
'36021' : 'Columbia',
'36023' : 'Cortland',
'36025' : 'Delaware',
'36027' : 'Dutchess',
'36029' : 'Erie',
'36031' : 'Essex',
'36033' : 'Franklin',
'36035' : 'Fulton',
'36037' : 'Genesee',
'36039' : 'Greene',
'36041' : 'Hamilton',
'36043' : 'Herkimer',
'36045' : 'Jefferson',
'36047' : 'Kings',
'36049' : 'Lewis',
'36051' : 'Livingston',
'36053' : 'Madison',
'36055' : 'Monroe',
'36057' : 'Montgomery',
'36059' : 'Nassau',
'36061' : 'New York',
'36063' : 'Niagara',
'36065' : 'Oneida',
'36067' : 'Onondaga',
'36069' : 'Ontario',
'36071' : 'Orange',
'36073' : 'Orleans',
'36075' : 'Oswego',
'36077' : 'Otsego',
'36079' : 'Putnam',
'36081' : 'Queens',
'36083' : 'Rensselaer',
'36085' : 'Richmond',
'36087' : 'Rockland',
'36089' : 'Saint Lawrence',
'36091' : 'Saratoga',
'36093' : 'Schenectady',
'36095' : 'Schoharie',
'36097' : 'Schuyler',
'36099' : 'Seneca',
'36101' : 'Steuben',
'36103' : 'Suffolk',
'36105' : 'Sullivan',
'36107' : 'Tioga',
'36109' : 'Tompkins',
'36111' : 'Ulster',
'36113' : 'Warren',
'36115' : 'Washington',
'36117' : 'Wayne',
'36119' : 'Westchester',
'36121' : 'Wyoming',
'36123' : 'Yates',
'37001' : 'Alamance',
'37003' : 'Alexander',
'37005' : 'Alleghany',
'37007' : 'Anson',
'37009' : 'Ashe',
'37011' : 'Avery',
'37013' : 'Beaufort',
'37015' : 'Bertie',
'37017' : 'Bladen',
'37019' : 'Brunswick',
'37021' : 'Buncombe',
'37023' : 'Burke',
'37025' : 'Cabarrus',
'37027' : 'Caldwell',
'37029' : 'Camden',
'37031' : 'Carteret',
'37033' : 'Caswell',
'37035' : 'Catawba',
'37037' : 'Chatham',
'37039' : 'Cherokee',
'37041' : 'Chowan',
'37043' : 'Clay',
'37045' : 'Cleveland',
'37047' : 'Columbus',
'37049' : 'Craven',
'37051' : 'Cumberland',
'37053' : 'Currituck',
'37055' : 'Dare',
'37057' : 'Davidson',
'37059' : 'Davie',
'37061' : 'Duplin',
'37063' : 'Durham',
'37065' : 'Edgecombe',
'37067' : 'Forsyth',
'37069' : 'Franklin',
'37071' : 'Gaston',
'37073' : 'Gates',
'37075' : 'Graham',
'37077' : 'Granville',
'37079' : 'Greene',
'37081' : 'Guilford',
'37083' : 'Halifax',
'37085' : 'Harnett',
'37087' : 'Haywood',
'37089' : 'Henderson',
'37091' : 'Hertford',
'37093' : 'Hoke',
'37095' : 'Hyde',
'37097' : 'Iredell',
'37099' : 'Jackson',
'37101' : 'Johnston',
'37103' : 'Jones',
'37105' : 'Lee',
'37107' : 'Lenoir',
'37109' : 'Lincoln',
'37111' : 'Mcdowell',
'37113' : 'Macon',
'37115' : 'Madison',
'37117' : 'Martin',
'37119' : 'Mecklenburg',
'37121' : 'Mitchell',
'37123' : 'Montgomery',
'37125' : 'Moore',
'37127' : 'Nash',
'37129' : 'New Hanover',
'37131' : 'Northampton',
'37133' : 'Onslow',
'37135' : 'Orange',
'37137' : 'Pamlico',
'37139' : 'Pasquotank',
'37141' : 'Pender',
'37143' : 'Perquimans',
'37145' : 'Person',
'37147' : 'Pitt',
'37149' : 'Polk',
'37151' : 'Randolph',
'37153' : 'Richmond',
'37155' : 'Robeson',
'37157' : 'Rockingham',
'37159' : 'Rowan',
'37161' : 'Rutherford',
'37163' : 'Sampson',
'37165' : 'Scotland',
'37167' : 'Stanly',
'37169' : 'Stokes',
'37171' : 'Surry',
'37173' : 'Swain',
'37175' : 'Transylvania',
'37177' : 'Tyrrell',
'37179' : 'Union',
'37181' : 'Vance',
'37183' : 'Wake',
'37185' : 'Warren',
'37187' : 'Washington',
'37189' : 'Watauga',
'37191' : 'Wayne',
'37193' : 'Wilkes',
'37195' : 'Wilson',
'37197' : 'Yadkin',
'37199' : 'Yancey',
'38001' : 'Adams',
'38003' : 'Barnes',
'38005' : 'Benson',
'38007' : 'Billings',
'38009' : 'Bottineau',
'38011' : 'Bowman',
'38013' : 'Burke',
'38015' : 'Burleigh',
'38017' : 'Cass',
'38019' : 'Cavalier',
'38021' : 'Dickey',
'38023' : 'Divide',
'38025' : 'Dunn',
'38027' : 'Eddy',
'38029' : 'Emmons',
'38031' : 'Foster',
'38033' : 'Golden Valley',
'38035' : 'Grand Forks',
'38037' : 'Grant',
'38039' : 'Griggs',
'38041' : 'Hettinger',
'38043' : 'Kidder',
'38045' : 'Lamoure',
'38047' : 'Logan',
'38049' : 'Mchenry',
'38051' : 'Mcintosh',
'38053' : 'Mckenzie',
'38055' : 'Mclean',
'38057' : 'Mercer',
'38059' : 'Morton',
'38061' : 'Mountrail',
'38063' : 'Nelson',
'38065' : 'Oliver',
'38067' : 'Pembina',
'38069' : 'Pierce',
'38071' : 'Ramsey',
'38073' : 'Ransom',
'38075' : 'Renville',
'38077' : 'Richland',
'38079' : 'Rolette',
'38081' : 'Sargent',
'38083' : 'Sheridan',
'38085' : 'Sioux',
'38087' : 'Slope',
'38089' : 'Stark',
'38091' : 'Steele',
'38093' : 'Stutsman',
'38095' : 'Towner',
'38097' : 'Traill',
'38099' : 'Walsh',
'38101' : 'Ward',
'38103' : 'Wells',
'38105' : 'Williams',
'39001' : 'Adams',
'39003' : 'Allen',
'39005' : 'Ashland',
'39007' : 'Ashtabula',
'39009' : 'Athens',
'39011' : 'Auglaize',
'39013' : 'Belmont',
'39015' : 'Brown',
'39017' : 'Butler',
'39019' : 'Carroll',
'39021' : 'Champaign',
'39023' : 'Clark',
'39025' : 'Clermont',
'39027' : 'Clinton',
'39029' : 'Columbiana',
'39031' : 'Coshocton',
'39033' : 'Crawford',
'39035' : 'Cuyahoga',
'39037' : 'Darke',
'39039' : 'Defiance',
'39041' : 'Delaware',
'39043' : 'Erie',
'39045' : 'Fairfield',
'39047' : 'Fayette',
'39049' : 'Franklin',
'39051' : 'Fulton',
'39053' : 'Gallia',
'39055' : 'Geauga',
'39057' : 'Greene',
'39059' : 'Guernsey',
'39061' : 'Hamilton',
'39063' : 'Hancock',
'39065' : 'Hardin',
'39067' : 'Harrison',
'39069' : 'Henry',
'39071' : 'Highland',
'39073' : 'Hocking',
'39075' : 'Holmes',
'39077' : 'Huron',
'39079' : 'Jackson',
'39081' : 'Jefferson',
'39083' : 'Knox',
'39085' : 'Lake',
'39087' : 'Lawrence',
'39089' : 'Licking',
'39091' : 'Logan',
'39093' : 'Lorain',
'39095' : 'Lucas',
'39097' : 'Madison',
'39099' : 'Mahoning',
'39101' : 'Marion',
'39103' : 'Medina',
'39105' : 'Meigs',
'39107' : 'Mercer',
'39109' : 'Miami',
'39111' : 'Monroe',
'39113' : 'Montgomery',
'39115' : 'Morgan',
'39117' : 'Morrow',
'39119' : 'Muskingum',
'39121' : 'Noble',
'39123' : 'Ottawa',
'39125' : 'Paulding',
'39127' : 'Perry',
'39129' : 'Pickaway',
'39131' : 'Pike',
'39133' : 'Portage',
'39135' : 'Preble',
'39137' : 'Putnam',
'39139' : 'Richland',
'39141' : 'Ross',
'39143' : 'Sandusky',
'39145' : 'Scioto',
'39147' : 'Seneca',
'39149' : 'Shelby',
'39151' : 'Stark',
'39153' : 'Summit',
'39155' : 'Trumbull',
'39157' : 'Tuscarawas',
'39159' : 'Union',
'39161' : 'Van Wert',
'39163' : 'Vinton',
'39165' : 'Warren',
'39167' : 'Washington',
'39169' : 'Wayne',
'39171' : 'Williams',
'39173' : 'Wood',
'39175' : 'Wyandot',
'40001' : 'Adair',
'40003' : 'Alfalfa',
'40005' : 'Atoka',
'40007' : 'Beaver',
'40009' : 'Beckham',
'40011' : 'Blaine',
'40013' : 'Bryan',
'40015' : 'Caddo',
'40017' : 'Canadian',
'40019' : 'Carter',
'40021' : 'Cherokee',
'40023' : 'Choctaw',
'40025' : 'Cimarron',
'40027' : 'Cleveland',
'40029' : 'Coal',
'40031' : 'Comanche',
'40033' : 'Cotton',
'40035' : 'Craig',
'40037' : 'Creek',
'40039' : 'Custer',
'40041' : 'Delaware',
'40043' : 'Dewey',
'40045' : 'Ellis',
'40047' : 'Garfield',
'40049' : 'Garvin',
'40051' : 'Grady',
'40053' : 'Grant',
'40055' : 'Greer',
'40057' : 'Harmon',
'40059' : 'Harper',
'40061' : 'Haskell',
'40063' : 'Hughes',
'40065' : 'Jackson',
'40067' : 'Jefferson',
'40069' : 'Johnston',
'40071' : 'Kay',
'40073' : 'Kingfisher',
'40075' : 'Kiowa',
'40077' : 'Latimer',
'40079' : 'Le Flore',
'40081' : 'Lincoln',
'40083' : 'Logan',
'40085' : 'Love',
'40087' : 'Mcclain',
'40089' : 'Mccurtain',
'40091' : 'Mcintosh',
'40093' : 'Major',
'40095' : 'Marshall',
'40097' : 'Mayes',
'40099' : 'Murray',
'40101' : 'Muskogee',
'40103' : 'Noble',
'40105' : 'Nowata',
'40107' : 'Okfuskee',
'40109' : 'Oklahoma',
'40111' : 'Okmulgee',
'40113' : 'Osage',
'40115' : 'Ottawa',
'40117' : 'Pawnee',
'40119' : 'Payne',
'40121' : 'Pittsburg',
'40123' : 'Pontotoc',
'40125' : 'Pottawatomie',
'40127' : 'Pushmataha',
'40129' : 'Roger Mills',
'40131' : 'Rogers',
'40133' : 'Seminole',
'40135' : 'Sequoyah',
'40137' : 'Stephens',
'40139' : 'Texas',
'40141' : 'Tillman',
'40143' : 'Tulsa',
'40145' : 'Wagoner',
'40147' : 'Washington',
'40149' : 'Washita',
'40151' : 'Woods',
'40153' : 'Woodward',
'41001' : 'Baker',
'41003' : 'Benton',
'41005' : 'Clackamas',
'41007' : 'Clatsop',
'41009' : 'Columbia',
'41011' : 'Coos',
'41013' : 'Crook',
'41015' : 'Curry',
'41017' : 'Deschutes',
'41019' : 'Douglas',
'41021' : 'Gilliam',
'41023' : 'Grant',
'41025' : 'Harney',
'41027' : 'Hood River',
'41029' : 'Jackson',
'41031' : 'Jefferson',
'41033' : 'Josephine',
'41035' : 'Klamath',
'41037' : 'Lake',
'41039' : 'Lane',
'41041' : 'Lincoln',
'41043' : 'Linn',
'41045' : 'Malheur',
'41047' : 'Marion',
'41049' : 'Morrow',
'41051' : 'Multnomah',
'41053' : 'Polk',
'41055' : 'Sherman',
'41057' : 'Tillamook',
'41059' : 'Umatilla',
'41061' : 'Union',
'41063' : 'Wallowa',
'41065' : 'Wasco',
'41067' : 'Washington',
'41069' : 'Wheeler',
'41071' : 'Yamhill',
'42001' : 'Adams',
'42003' : 'Allegheny',
'42005' : 'Armstrong',
'42007' : 'Beaver',
'42009' : 'Bedford',
'42011' : 'Berks',
'42013' : 'Blair',
'42015' : 'Bradford',
'42017' : 'Bucks',
'42019' : 'Butler',
'42021' : 'Cambria',
'42023' : 'Cameron',
'42025' : 'Carbon',
'42027' : 'Centre',
'42029' : 'Chester',
'42031' : 'Clarion',
'42033' : 'Clearfield',
'42035' : 'Clinton',
'42037' : 'Columbia',
'42039' : 'Crawford',
'42041' : 'Cumberland',
'42043' : 'Dauphin',
'42045' : 'Delaware',
'42047' : 'Elk',
'42049' : 'Erie',
'42051' : 'Fayette',
'42053' : 'Forest',
'42055' : 'Franklin',
'42057' : 'Fulton',
'42059' : 'Greene',
'42061' : 'Huntingdon',
'42063' : 'Indiana',
'42065' : 'Jefferson',
'42067' : 'Juniata',
'42069' : 'Lackawanna',
'42071' : 'Lancaster',
'42073' : 'Lawrence',
'42075' : 'Lebanon',
'42077' : 'Lehigh',
'42079' : 'Luzerne',
'42081' : 'Lycoming',
'42083' : 'Mckean',
'42085' : 'Mercer',
'42087' : 'Mifflin',
'42089' : 'Monroe',
'42091' : 'Montgomery',
'42093' : 'Montour',
'42095' : 'Northampton',
'42097' : 'Northumberland',
'42099' : 'Perry',
'42101' : 'Philadelphia',
'42103' : 'Pike',
'42105' : 'Potter',
'42107' : 'Schuylkill',
'42109' : 'Snyder',
'42111' : 'Somerset',
'42113' : 'Sullivan',
'42115' : 'Susquehanna',
'42117' : 'Tioga',
'42119' : 'Union',
'42121' : 'Venango',
'42123' : 'Warren',
'42125' : 'Washington',
'42127' : 'Wayne',
'42129' : 'Westmoreland',
'42131' : 'Wyoming',
'42133' : 'York',
'44001' : 'Bristol',
'44003' : 'Kent',
'44005' : 'Newport',
'44007' : 'Providence',
'44009' : 'Washington',
'45001' : 'Abbeville',
'45003' : 'Aiken',
'45005' : 'Allendale',
'45007' : 'Anderson',
'45009' : 'Bamberg',
'45011' : 'Barnwell',
'45013' : 'Beaufort',
'45015' : 'Berkeley',
'45017' : 'Calhoun',
'45019' : 'Charleston',
'45021' : 'Cherokee',
'45023' : 'Chester',
'45025' : 'Chesterfield',
'45027' : 'Clarendon',
'45029' : 'Colleton',
'45031' : 'Darlington',
'45033' : 'Dillon',
'45035' : 'Dorchester',
'45037' : 'Edgefield',
'45039' : 'Fairfield',
'45041' : 'Florence',
'45043' : 'Georgetown',
'45045' : 'Greenville',
'45047' : 'Greenwood',
'45049' : 'Hampton',
'45051' : 'Horry',
'45053' : 'Jasper',
'45055' : 'Kershaw',
'45057' : 'Lancaster',
'45059' : 'Laurens',
'45061' : 'Lee',
'45063' : 'Lexington',
'45065' : 'Mccormick',
'45067' : 'Marion',
'45069' : 'Marlboro',
'45071' : 'Newberry',
'45073' : 'Oconee',
'45075' : 'Orangeburg',
'45077' : 'Pickens',
'45079' : 'Richland',
'45081' : 'Saluda',
'45083' : 'Spartanburg',
'45085' : 'Sumter',
'45087' : 'Union',
'45089' : 'Williamsburg',
'45091' : 'York',
'46003' : 'Aurora',
'46005' : 'Beadle',
'46007' : 'Bennett',
'46009' : 'Bon Homme',
'46011' : 'Brookings',
'46013' : 'Brown',
'46015' : 'Brule',
'46017' : 'Buffalo',
'46019' : 'Butte',
'46021' : 'Campbell',
'46023' : 'Charles Mix',
'46025' : 'Clark',
'46027' : 'Clay',
'46029' : 'Codington',
'46031' : 'Corson',
'46033' : 'Custer',
'46035' : 'Davison',
'46037' : 'Day',
'46039' : 'Deuel',
'46041' : 'Dewey',
'46043' : 'Douglas',
'46045' : 'Edmunds',
'46047' : 'Fall River',
'46049' : 'Faulk',
'46051' : 'Grant',
'46053' : 'Gregory',
'46055' : 'Haakon',
'46057' : 'Hamlin',
'46059' : 'Hand',
'46061' : 'Hanson',
'46063' : 'Harding',
'46065' : 'Hughes',
'46067' : 'Hutchinson',
'46069' : 'Hyde',
'46071' : 'Jackson',
'46073' : 'Jerauld',
'46075' : 'Jones',
'46077' : 'Kingsbury',
'46079' : 'Lake',
'46081' : 'Lawrence',
'46083' : 'Lincoln',
'46085' : 'Lyman',
'46087' : 'Mccook',
'46089' : 'Mcpherson',
'46091' : 'Marshall',
'46093' : 'Meade',
'46095' : 'Mellette',
'46097' : 'Miner',
'46099' : 'Minnehaha',
'46101' : 'Moody',
'46102' : 'Oglala Lakota',
'46103' : 'Pennington',
'46105' : 'Perkins',
'46107' : 'Potter',
'46109' : 'Roberts',
'46111' : 'Sanborn',
'46113' : 'Shannon', #deprecated 10/2015, use 46102 Oglala Lakota
'46115' : 'Spink',
'46117' : 'Stanley',
'46119' : 'Sully',
'46121' : 'Todd',
'46123' : 'Tripp',
'46125' : 'Turner',
'46127' : 'Union',
'46129' : 'Walworth',
'46135' : 'Yankton',
'46137' : 'Ziebach',
'47001' : 'Anderson',
'47003' : 'Bedford',
'47005' : 'Benton',
'47007' : 'Bledsoe',
'47009' : 'Blount',
'47011' : 'Bradley',
'47013' : 'Campbell',
'47015' : 'Cannon',
'47017' : 'Carroll',
'47019' : 'Carter',
'47021' : 'Cheatham',
'47023' : 'Chester',
'47025' : 'Claiborne',
'47027' : 'Clay',
'47029' : 'Cocke',
'47031' : 'Coffee',
'47033' : 'Crockett',
'47035' : 'Cumberland',
'47037' : 'Davidson',
'47039' : 'Decatur',
'47041' : 'Dekalb',
'47043' : 'Dickson',
'47045' : 'Dyer',
'47047' : 'Fayette',
'47049' : 'Fentress',
'47051' : 'Franklin',
'47053' : 'Gibson',
'47055' : 'Giles',
'47057' : 'Grainger',
'47059' : 'Greene',
'47061' : 'Grundy',
'47063' : 'Hamblen',
'47065' : 'Hamilton',
'47067' : 'Hancock',
'47069' : 'Hardeman',
'47071' : 'Hardin',
'47073' : 'Hawkins',
'47075' : 'Haywood',
'47077' : 'Henderson',
'47079' : 'Henry',
'47081' : 'Hickman',
'47083' : 'Houston',
'47085' : 'Humphreys',
'47087' : 'Jackson',
'47089' : 'Jefferson',
'47091' : 'Johnson',
'47093' : 'Knox',
'47095' : 'Lake',
'47097' : 'Lauderdale',
'47099' : 'Lawrence',
'47101' : 'Lewis',
'47103' : 'Lincoln',
'47105' : 'Loudon',
'47107' : 'Mcminn',
'47109' : 'Mcnairy',
'47111' : 'Macon',
'47113' : 'Madison',
'47115' : 'Marion',
'47117' : 'Marshall',
'47119' : 'Maury',
'47121' : 'Meigs',
'47123' : 'Monroe',
'47125' : 'Montgomery',
'47127' : 'Moore',
'47129' : 'Morgan',
'47131' : 'Obion',
'47133' : 'Overton',
'47135' : 'Perry',
'47137' : 'Pickett',
'47139' : 'Polk',
'47141' : 'Putnam',
'47143' : 'Rhea',
'47145' : 'Roane',
'47147' : 'Robertson',
'47149' : 'Rutherford',
'47151' : 'Scott',
'47153' : 'Sequatchie',
'47155' : 'Sevier',
'47157' : 'Shelby',
'47159' : 'Smith',
'47161' : 'Stewart',
'47163' : 'Sullivan',
'47165' : 'Sumner',
'47167' : 'Tipton',
'47169' : 'Trousdale',
'47171' : 'Unicoi',
'47173' : 'Union',
'47175' : 'Van Buren',
'47177' : 'Warren',
'47179' : 'Washington',
'47181' : 'Wayne',
'47183' : 'Weakley',
'47185' : 'White',
'47187' : 'Williamson',
'47189' : 'Wilson',
'48001' : 'Anderson',
'48003' : 'Andrews',
'48005' : 'Angelina',
'48007' : 'Aransas',
'48009' : 'Archer',
'48011' : 'Armstrong',
'48013' : 'Atascosa',
'48015' : 'Austin',
'48017' : 'Bailey',
'48019' : 'Bandera',
'48021' : 'Bastrop',
'48023' : 'Baylor',
'48025' : 'Bee',
'48027' : 'Bell',
'48029' : 'Bexar',
'48031' : 'Blanco',
'48033' : 'Borden',
'48035' : 'Bosque',
'48037' : 'Bowie',
'48039' : 'Brazoria',
'48041' : 'Brazos',
'48043' : 'Brewster',
'48045' : 'Briscoe',
'48047' : 'Brooks',
'48049' : 'Brown',
'48051' : 'Burleson',
'48053' : 'Burnet',
'48055' : 'Caldwell',
'48057' : 'Calhoun',
'48059' : 'Callahan',
'48061' : 'Cameron',
'48063' : 'Camp',
'48065' : 'Carson',
'48067' : 'Cass',
'48069' : 'Castro',
'48071' : 'Chambers',
'48073' : 'Cherokee',
'48075' : 'Childress',
'48077' : 'Clay',
'48079' : 'Cochran',
'48081' : 'Coke',
'48083' : 'Coleman',
'48085' : 'Collin',
'48087' : 'Collingsworth',
'48089' : 'Colorado',
'48091' : 'Comal',
'48093' : 'Comanche',
'48095' : 'Concho',
'48097' : 'Cooke',
'48099' : 'Coryell',
'48101' : 'Cottle',
'48103' : 'Crane',
'48105' : 'Crockett',
'48107' : 'Crosby',
'48109' : 'Culberson',
'48111' : 'Dallam',
'48113' : 'Dallas',
'48115' : 'Dawson',
'48117' : 'Deaf Smith',
'48119' : 'Delta',
'48121' : 'Denton',
'48123' : 'Dewitt',
'48125' : 'Dickens',
'48127' : 'Dimmit',
'48129' : 'Donley',
'48131' : 'Duval',
'48133' : 'Eastland',
'48135' : 'Ector',
'48137' : 'Edwards',
'48139' : 'Ellis',
'48141' : 'El Paso',
'48143' : 'Erath',
'48145' : 'Falls',
'48147' : 'Fannin',
'48149' : 'Fayette',
'48151' : 'Fisher',
'48153' : 'Floyd',
'48155' : 'Foard',
'48157' : 'Fort Bend',
'48159' : 'Franklin',
'48161' : 'Freestone',
'48163' : 'Frio',
'48165' : 'Gaines',
'48167' : 'Galveston',
'48169' : 'Garza',
'48171' : 'Gillespie',
'48173' : 'Glasscock',
'48175' : 'Goliad',
'48177' : 'Gonzales',
'48179' : 'Gray',
'48181' : 'Grayson',
'48183' : 'Gregg',
'48185' : 'Grimes',
'48187' : 'Guadalupe',
'48189' : 'Hale',
'48191' : 'Hall',
'48193' : 'Hamilton',
'48195' : 'Hansford',
'48197' : 'Hardeman',
'48199' : 'Hardin',
'48201' : 'Harris',
'48203' : 'Harrison',
'48205' : 'Hartley',
'48207' : 'Haskell',
'48209' : 'Hays',
'48211' : 'Hemphill',
'48213' : 'Henderson',
'48215' : 'Hidalgo',
'48217' : 'Hill',
'48219' : 'Hockley',
'48221' : 'Hood',
'48223' : 'Hopkins',
'48225' : 'Houston',
'48227' : 'Howard',
'48229' : 'Hudspeth',
'48231' : 'Hunt',
'48233' : 'Hutchinson',
'48235' : 'Irion',
'48237' : 'Jack',
'48239' : 'Jackson',
'48241' : 'Jasper',
'48243' : 'Jeff Davis',
'48245' : 'Jefferson',
'48247' : 'Jim Hogg',
'48249' : 'Jim Wells',
'48251' : 'Johnson',
'48253' : 'Jones',
'48255' : 'Karnes',
'48257' : 'Kaufman',
'48259' : 'Kendall',
'48261' : 'Kenedy',
'48263' : 'Kent',
'48265' : 'Kerr',
'48267' : 'Kimble',
'48269' : 'King',
'48271' : 'Kinney',
'48273' : 'Kleberg',
'48275' : 'Knox',
'48277' : 'Lamar',
'48279' : 'Lamb',
'48281' : 'Lampasas',
'48283' : 'La Salle',
'48285' : 'Lavaca',
'48287' : 'Lee',
'48289' : 'Leon',
'48291' : 'Liberty',
'48293' : 'Limestone',
'48295' : 'Lipscomb',
'48297' : 'Live Oak',
'48299' : 'Llano',
'48301' : 'Loving',
'48303' : 'Lubbock',
'48305' : 'Lynn',
'48307' : 'Mcculloch',
'48309' : 'Mclennan',
'48311' : 'Mcmullen',
'48313' : 'Madison',
'48315' : 'Marion',
'48317' : 'Martin',
'48319' : 'Mason',
'48321' : 'Matagorda',
'48323' : 'Maverick',
'48325' : 'Medina',
'48327' : 'Menard',
'48329' : 'Midland',
'48331' : 'Milam',
'48333' : 'Mills',
'48335' : 'Mitchell',
'48337' : 'Montague',
'48339' : 'Montgomery',
'48341' : 'Moore',
'48343' : 'Morris',
'48345' : 'Motley',
'48347' : 'Nacogdoches',
'48349' : 'Navarro',
'48351' : 'Newton',
'48353' : 'Nolan',
'48355' : 'Nueces',
'48357' : 'Ochiltree',
'48359' : 'Oldham',
'48361' : 'Orange',
'48363' : 'Palo Pinto',
'48365' : 'Panola',
'48367' : 'Parker',
'48369' : 'Parmer',
'48371' : 'Pecos',
'48373' : 'Polk',
'48375' : 'Potter',
'48377' : 'Presidio',
'48379' : 'Rains',
'48381' : 'Randall',
'48383' : 'Reagan',
'48385' : 'Real',
'48387' : 'Red River',
'48389' : 'Reeves',
'48391' : 'Refugio',
'48393' : 'Roberts',
'48395' : 'Robertson',
'48397' : 'Rockwall',
'48399' : 'Runnels',
'48401' : 'Rusk',
'48403' : 'Sabine',
'48405' : 'San Augustine',
'48407' : 'San Jacinto',
'48409' : 'San Patricio',
'48411' : 'San Saba',
'48413' : 'Schleicher',
'48415' : 'Scurry',
'48417' : 'Shackelford',
'48419' : 'Shelby',
'48421' : 'Sherman',
'48423' : 'Smith',
'48425' : 'Somervell',
'48427' : 'Starr',
'48429' : 'Stephens',
'48431' : 'Sterling',
'48433' : 'Stonewall',
'48435' : 'Sutton',
'48437' : 'Swisher',
'48439' : 'Tarrant',
'48441' : 'Taylor',
'48443' : 'Terrell',
'48445' : 'Terry',
'48447' : 'Throckmorton',
'48449' : 'Titus',
'48451' : 'Tom Green',
'48453' : 'Travis',
'48455' : 'Trinity',
'48457' : 'Tyler',
'48459' : 'Upshur',
'48461' : 'Upton',
'48463' : 'Uvalde',
'48465' : 'Val Verde',
'48467' : 'Van Zandt',
'48469' : 'Victoria',
'48471' : 'Walker',
'48473' : 'Waller',
'48475' : 'Ward',
'48477' : 'Washington',
'48479' : 'Webb',
'48481' : 'Wharton',
'48483' : 'Wheeler',
'48485' : 'Wichita',
'48487' : 'Wilbarger',
'48489' : 'Willacy',
'48491' : 'Williamson',
'48493' : 'Wilson',
'48495' : 'Winkler',
'48497' : 'Wise',
'48499' : 'Wood',
'48501' : 'Yoakum',
'48503' : 'Young',
'48505' : 'Zapata',
'48507' : 'Zavala',
'49001' : 'Beaver',
'49003' : 'Box Elder',
'49005' : 'Cache',
'49007' : 'Carbon',
'49009' : 'Daggett',
'49011' : 'Davis',
'49013' : 'Duchesne',
'49015' : 'Emery',
'49017' : 'Garfield',
'49019' : 'Grand',
'49021' : 'Iron',
'49023' : 'Juab',
'49025' : 'Kane',
'49027' : 'Millard',
'49029' : 'Morgan',
'49031' : 'Piute',
'49033' : 'Rich',
'49035' : 'Salt Lake',
'49037' : 'San Juan',
'49039' : 'Sanpete',
'49041' : 'Sevier',
'49043' : 'Summit',
'49045' : 'Tooele',
'49047' : 'Uintah',
'49049' : 'Utah',
'49051' : 'Wasatch',
'49053' : 'Washington',
'49055' : 'Wayne',
'49057' : 'Weber',
'50001' : 'Addison',
'50003' : 'Bennington',
'50005' : 'Caledonia',
'50007' : 'Chittenden',
'50009' : 'Essex',
'50011' : 'Franklin',
'50013' : 'Grand Isle',
'50015' : 'Lamoille',
'50017' : 'Orange',
'50019' : 'Orleans',
'50021' : 'Rutland',
'50023' : 'Washington',
'50025' : 'Windham',
'50027' : 'Windsor',
'51001' : 'Accomack',
'51003' : 'Albemarle',
'51005' : 'Alleghany',
'51007' : 'Amelia',
'51009' : 'Amherst',
'51011' : 'Appomattox',
'51013' : 'Arlington',
'51015' : 'Augusta',
'51017' : 'Bath',
'51019' : 'Bedford',
'51021' : 'Bland',
'51023' : 'Botetourt',
'51025' : 'Brunswick',
'51027' : 'Buchanan',
'51029' : 'Buckingham',
'51031' : 'Campbell',
'51033' : 'Caroline',
'51035' : 'Carroll',
'51036' : 'Charles City',
'51037' : 'Charlotte',
'51041' : 'Chesterfield',
'51043' : 'Clarke',
'51045' : 'Craig',
'51047' : 'Culpeper',
'51049' : 'Cumberland',
'51051' : 'Dickenson',
'51053' : 'Dinwiddie',
'51057' : 'Essex',
'51059' : 'Fairfax',
'51061' : 'Fauquier',
'51063' : 'Floyd',
'51065' : 'Fluvanna',
'51067' : 'Franklin',
'51069' : 'Frederick',
'51071' : 'Giles',
'51073' : 'Gloucester',
'51075' : 'Goochland',
'51077' : 'Grayson',
'51079' : 'Greene',
'51081' : 'Greensville',
'51083' : 'Halifax',
'51085' : 'Hanover',
'51087' : 'Henrico',
'51089' : 'Henry',
'51091' : 'Highland',
'51093' : 'Isle of Wight',
'51095' : 'James City',
'51097' : 'King and Queen',
'51099' : 'King George',
'51101' : 'King William',
'51103' : 'Lancaster',
'51105' : 'Lee',
'51107' : 'Loudoun',
'51109' : 'Louisa',
'51111' : 'Lunenburg',
'51113' : 'Madison',
'51115' : 'Mathews',
'51117' : 'Mecklenburg',
'51119' : 'Middlesex',
'51121' : 'Montgomery',
'51125' : 'Nelson',
'51127' : 'New Kent',
'51131' : 'Northampton',
'51133' : 'Northumberland',
'51135' : 'Nottoway',
'51137' : 'Orange',
'51139' : 'Page',
'51141' : 'Patrick',
'51143' : 'Pittsylvania',
'51145' : 'Powhatan',
'51147' : 'Prince Edward',
'51149' : 'Prince George',
'51153' : 'Prince William',
'51155' : 'Pulaski',
'51157' : 'Rappahannock',
'51159' : 'Richmond',
'51161' : 'Roanoke',
'51163' : 'Rockbridge',
'51165' : 'Rockingham',
'51167' : 'Russell',
'51169' : 'Scott',
'51171' : 'Shenandoah',
'51173' : 'Smyth',
'51175' : 'Southampton',
'51177' : 'Spotsylvania',
'51179' : 'Stafford',
'51181' : 'Surry',
'51183' : 'Sussex',
'51185' : 'Tazewell',
'51187' : 'Warren',
'51191' : 'Washington',
'51193' : 'Westmoreland',
'51195' : 'Wise',
'51197' : 'Wythe',
'51199' : 'York',
'51510' : 'City of Alexandria',
'51515' : 'City of Bedford',
'51520' : 'City of Bristol',
'51530' : 'City of Buena Vista',
'51540' : 'City of Charlottesville',
'51550' : 'City of Chesapeake',
'51560' : 'City of Clifton Forge',
'51570' : 'City of Colonial Heights',
'51580' : 'City of Covington',
'51590' : 'City of Danville',
'51595' : 'City of Emporia',
'51600' : 'City of Fairfax',
'51610' : 'City of Falls Church',
'51620' : 'City of Franklin',
'51630' : 'City of Fredericksburg',
'51640' : 'City of Galax',
'51650' : 'City of Hampton',
'51660' : 'City of Harrisonburg',
'51670' : 'City of Hopewell',
'51678' : 'City of Lexington',
'51680' : 'City of Lynchburg',
'51683' : 'City of Manassas',
'51685' : 'City of Manassas Park',
'51690' : 'City of Martinsville',
'51700' : 'City of Newport News',
'51710' : 'City of Norfolk',
'51720' : 'City of Norton',
'51730' : 'City of Petersburg',
'51735' : 'City of Poquoson',
'51740' : 'City of Portsmouth',
'51750' : 'City of Radford',
'51760' : 'City of Richmond',
'51770' : 'City of Roanoke',
'51775' : 'City of Salem',
'51790' : 'City of Staunton',
'51800' : 'City of Suffolk',
'51810' : 'City of Virginia Beach',
'51820' : 'City of Waynesboro',
'51830' : 'City of Williamsburg',
'51840' : 'City of Winchester',
'53001' : 'Adams',
'53003' : 'Asotin',
'53005' : 'Benton',
'53007' : 'Chelan',
'53009' : 'Clallam',
'53011' : 'Clark',
'53013' : 'Columbia',
'53015' : 'Cowlitz',
'53017' : 'Douglas',
'53019' : 'Ferry',
'53021' : 'Franklin',
'53023' : 'Garfield',
'53025' : 'Grant',
'53027' : 'Grays Harbor',
'53029' : 'Island',
'53031' : 'Jefferson',
'53033' : 'King',
'53035' : 'Kitsap',
'53037' : 'Kittitas',
'53039' : 'Klickitat',
'53041' : 'Lewis',
'53043' : 'Lincoln',
'53045' : 'Mason',
'53047' : 'Okanogan',
'53049' : 'Pacific',
'53051' : 'Pend Oreille',
'53053' : 'Pierce',
'53055' : 'San Juan',
'53057' : 'Skagit',
'53059' : 'Skamania',
'53061' : 'Snohomish',
'53063' : 'Spokane',
'53065' : 'Stevens',
'53067' : 'Thurston',
'53069' : 'Wahkiakum',
'53071' : 'Walla Walla',
'53073' : 'Whatcom',
'53075' : 'Whitman',
'53077' : 'Yakima',
'54001' : 'Barbour',
'54003' : 'Berkeley',
'54005' : 'Boone',
'54007' : 'Braxton',
'54009' : 'Brooke',
'54011' : 'Cabell',
'54013' : 'Calhoun',
'54015' : 'Clay',
'54017' : 'Doddridge',
'54019' : 'Fayette',
'54021' : 'Gilmer',
'54023' : 'Grant',
'54025' : 'Greenbrier',
'54027' : 'Hampshire',
'54029' : 'Hancock',
'54031' : 'Hardy',
'54033' : 'Harrison',
'54035' : 'Jackson',
'54037' : 'Jefferson',
'54039' : 'Kanawha',
'54041' : 'Lewis',
'54043' : 'Lincoln',
'54045' : 'Logan',
'54047' : 'Mcdowell',
'54049' : 'Marion',
'54051' : 'Marshall',
'54053' : 'Mason',
'54055' : 'Mercer',
'54057' : 'Mineral',
'54059' : 'Mingo',
'54061' : 'Monongalia',
'54063' : 'Monroe',
'54065' : 'Morgan',
'54067' : 'Nicholas',
'54069' : 'Ohio',
'54071' : 'Pendleton',
'54073' : 'Pleasants',
'54075' : 'Pocahontas',
'54077' : 'Preston',
'54079' : 'Putnam',
'54081' : 'Raleigh',
'54083' : 'Randolph',
'54085' : 'Ritchie',
'54087' : 'Roane',
'54089' : 'Summers',
'54091' : 'Taylor',
'54093' : 'Tucker',
'54095' : 'Tyler',
'54097' : 'Upshur',
'54099' : 'Wayne',
'54101' : 'Webster',
'54103' : 'Wetzel',
'54105' : 'Wirt',
'54107' : 'Wood',
'54109' : 'Wyoming',
'55001' : 'Adams',
'55003' : 'Ashland',
'55005' : 'Barron',
'55007' : 'Bayfield',
'55009' : 'Brown',
'55011' : 'Buffalo',
'55013' : 'Burnett',
'55015' : 'Calumet',
'55017' : 'Chippewa',
'55019' : 'Clark',
'55021' : 'Columbia',
'55023' : 'Crawford',
'55025' : 'Dane',
'55027' : 'Dodge',
'55029' : 'Door',
'55031' : 'Douglas',
'55033' : 'Dunn',
'55035' : 'Eau Claire',
'55037' : 'Florence',
'55039' : 'Fond du Lac',
'55041' : 'Forest',
'55043' : 'Grant',
'55045' : 'Green',
'55047' : 'Green Lake',
'55049' : 'Iowa',
'55051' : 'Iron',
'55053' : 'Jackson',
'55055' : 'Jefferson',
'55057' : 'Juneau',
'55059' : 'Kenosha',
'55061' : 'Kewaunee',
'55063' : 'La Crosse',
'55065' : 'Lafayette',
'55067' : 'Langlade',
'55069' : 'Lincoln',
'55071' : 'Manitowoc',
'55073' : 'Marathon',
'55075' : 'Marinette',
'55077' : 'Marquette',
'55078' : 'Menominee',
'55079' : 'Milwaukee',
'55081' : 'Monroe',
'55083' : 'Oconto',
'55085' : 'Oneida',
'55087' : 'Outagamie',
'55089' : 'Ozaukee',
'55091' : 'Pepin',
'55093' : 'Pierce',
'55095' : 'Polk',
'55097' : 'Portage',
'55099' : 'Price',
'55101' : 'Racine',
'55103' : 'Richland',
'55105' : 'Rock',
'55107' : 'Rusk',
'55109' : 'Saint Croix',
'55111' : 'Sauk',
'55113' : 'Sawyer',
'55115' : 'Shawano',
'55117' : 'Sheboygan',
'55119' : 'Taylor',
'55121' : 'Trempealeau',
'55123' : 'Vernon',
'55125' : 'Vilas',
'55127' : 'Walworth',
'55129' : 'Washburn',
'55131' : 'Washington',
'55133' : 'Waukesha',
'55135' : 'Waupaca',
'55137' : 'Waushara',
'55139' : 'Winnebago',
'55141' : 'Wood',
'56001' : 'Albany',
'56003' : 'Big Horn',
'56005' : 'Campbell',
'56007' : 'Carbon',
'56009' : 'Converse',
'56011' : 'Crook',
'56013' : 'Fremont',
'56015' : 'Goshen',
'56017' : 'Hot Springs',
'56019' : 'Johnson',
'56021' : 'Laramie',
'56023' : 'Lincoln',
'56025' : 'Natrona',
'56027' : 'Niobrara',
'56029' : 'Park',
'56031' : 'Platte',
'56033' : 'Sheridan',
'56035' : 'Sublette',
'56037' : 'Sweetwater',
'56039' : 'Teton',
'56041' : 'Uinta',
'56043' : 'Washakie',
'56045' : 'Weston',
'60010' : 'Eastern District',
'60020' : 'Manu\'A District',
'60030' : 'Rose Island',
'60040' : 'Swains Island',
'60050' : 'Western District',
'66010' : 'Guam',
'69085' : 'Northern Islands',
'69100' : 'Rota',
'69110' : 'Saipan',
'69120' : 'Tinian',
'72001' : 'Adjuntas',
'72003' : 'Aguada',
'72005' : 'Aguadilla',
'72007' : 'Aguas Buenas',
'72009' : 'Aibonito',
'72011' : 'Anasco',
'72013' : 'Arecibo',
'72015' : 'Arroyo',
'72017' : 'Barceloneta',
'72019' : 'Barranquitas',
'72021' : 'Bayamo\'N',
'72023' : 'Cabo Rojo',
'72025' : 'Caguas',
'72027' : 'Camuy',
'72029' : 'Canovanas',
'72031' : 'Carolina',
'72033' : 'Catano',
'72035' : 'Cayey',
'72037' : 'Ceiba',
'72039' : 'Ciales',
'72041' : 'Cidra',
'72043' : 'Coamo',
'72045' : 'Comerio',
'72047' : 'Corozal',
'72049' : 'Culebra',
'72051' : 'Dorado',
'72053' : 'Fajardo',
'72054' : 'Florida',
'72055' : 'Guanica',
'72057' : 'Guayama',
'72059' : 'Guayanilla',
'72061' : 'Guaynabo',
'72063' : 'Gurabo',
'72065' : 'Hatillo',
'72067' : 'Hormigueros',
'72069' : 'Humacao',
'72071' : 'Lsabela',
'72073' : 'Jayuya',
'72075' : 'Juana Diaz',
'72077' : 'Juncos',
'72079' : 'Lajas',
'72081' : 'Lares',
'72083' : 'Las Marias',
'72085' : 'Las Piedras',
'72087' : 'Loiza',
'72089' : 'Luquillo',
'72091' : 'Manati',
'72093' : 'Maricao',
'72095' : 'Maunabo',
'72097' : 'Mayaguez',
'72099' : 'Moca',
'72101' : 'Morovis',
'72103' : 'Naguabo',
'72105' : 'Naranjito',
'72107' : 'Orocovis',
'72109' : 'Patillas',
'72111' : 'Penuelas',
'72113' : 'Ponce',
'72115' : 'Quebradillas',
'72117' : 'Rincon',
'72119' : 'Rio Grande',
'72121' : 'Sabana Grande',
'72123' : 'Salinas',
'72125' : 'San German',
'72127' : 'San Juan',
'72129' : 'San Lorenzo',
'72131' : 'San Sebastian',
'72133' : 'Santa Isabel',
'72135' : 'Toa Alta',
'72137' : 'Toa Baja',
'72139' : 'Trujillo Alto',
'72141' : 'Utuado',
'72143' : 'Vega Alta',
'72145' : 'Vega Baja',
'72147' : 'Vieques',
'72149' : 'Villalba',
'72151' : 'Yabucoa',
'72153' : 'Yauco',
'78010' : 'Saint Croix',
'78020' : 'Saint John',
'78030' : 'Saint Thomas',
'64002' : 'Chuuk*',
'64005' : 'Kosrae',
'64040' : 'Pohnpeit*',
'64060' : 'Yap',
'68007' : 'Ailinginae',
'68010' : 'Ailinglaplap',
'68030' : 'Ailuk',
'68040' : 'Arno',
'68050' : 'Aur',
'68060' : 'Bikar',
'68070' : 'Bikini',
'68073' : 'Bokak',
'68080' : 'Ebon',
'68090' : 'Enewetak',
'68100' : 'Erikub',
'68110' : 'Jabat',
'68120' : 'Jaluit',
'68130' : 'Jemo',
'68140' : 'Kili',
'68150' : 'Kwajalein',
'68160' : 'Lae',
'68170' : 'Lib',
'68180' : 'Likiep',
'68190' : 'Majuro',
'68300' : 'Maloelap',
'68310' : 'Mejit',
'68320' : 'Mili',
'68330' : 'Namorik',
'68340' : 'Namu',
'68350' : 'Rongelap',
'68360' : 'Rongrik',
'68385' : 'Toke',
'68390' : 'Ujae',
'68400' : 'Ujelang',
'68410' : 'Utrik',
'68420' : 'Wotho',
'68430' : 'Wotle',
'70002' : 'Aimeliik',
'70004' : 'Airai',
'70010' : 'Angaur',
'70050' : 'Hatoboheit*',
'70100' : 'Kayangel',
'70150' : 'Koror',
'70212' : 'Melekeok*',
'70214' : 'Ngaraard',
'70218' : 'Ngarchelong',
'70222' : 'Ngardmau',
'70224' : 'Ngatpang',
'70226' : 'Ngchesar',
'70227' : 'Ngernmlengui*',
'70228' : 'Ngiwal',
'70350' : 'Peleliu',
'70370' : 'Sonsorol',
'74050' : 'Baker Island',
'74100' : 'Howland Island',
'74150' : 'Jarvis Island',
'74200' : 'Johnston Island',
'74250' : 'Kingman Reef',
'74300' : 'Midway Islands',
'74350' : 'Navassa Island',
'74400' : 'Palmyra Atoll',
'74450' : 'Wake Island',
#Marine Locations
'75610' : 'Lake Okeechobee',
'77657' : 'Coastal waters from East Cape Sable to Chokoloskee FL out 20 NM',
'77656' : 'Coastal waters from Chokoloskee to Bonita Beach FL out 20 NM',
'77853' : 'Coastal waters from Englewood to Tarpon Springs FL out 20 NM',
'77830' : 'Tampa Bay waters',
'77250' : 'Coastal waters from Baffin Bay to Port Aransas out 20 NM',
'77255' : 'Coastal waters from Port Aransas to Matagorda Ship Channel out 20 NM',
'77155' : 'Coastal waters from Baffin Bay to Port Mansfield TX out 20 NM',
'77135' : 'Laguna Madre From 5 nm North of Port Mansfield To Baffin Bay TX',
'77150' : 'Coastal waters from Port Mansfield TX to the Rio Grande River out 20 NM',
'77132' : 'Laguna Madre From The Arroyo Coloardo To 5 NM North of Port Mansfield TX',
'77130' : 'Laguna Madre From the Port of Brownsville to the Arroyo Colorado',
'75671' : 'Waters from Deerfield Beach to Ocean Reef FL from 20 to 60 NM excluding the territorial waters of Bahamas',
'77676' : 'Waters from Chokoloskee to Bonita Beach FL from 20 to 60 NM',
'77876' : 'Waters from Bonita Beach to Englewood FL out 20 to 60 NM',
'77873' : 'Waters from Englewood to Tarpon Springs FL out 20 to 60 NM',
'77170' : 'Waters from Port Mansfield TX to the Rio Grande River from 20 to 60 NM',
'77175' : 'Waters from Baffin Bay to Port Mansfield TX from 20 to 60 NM',
'77270' : 'Waters from Baffin Bay to Port Aransas from 20 to 60 NM',
'77275' : 'Waters from Port Aransas to Matagorda Ship Channel from 20 to 60 NM',
'77370' : 'Waters from Freeport to Matagorda Ship Channel TX from 20 to 60 NM',
'75712' : 'Coastal Waters of Northern Puerto Rico out 10 NM',
'75725' : 'Coastal Waters of Southern USVI, Vieques, and Eastern Puerto Rico out 10 NM',
'75745' : 'Coastal Waters OF Southwestern Puerto Rico out 10 NM',
'75735' : 'Coastal Waters of Southern Puerto Rico out 10 NM',
'75742' : 'Coastal Waters OF Northwestern Puerto Rico out 10 NM',
'75741' : 'Mona Passage Southward to 17N',
'75732' : 'Caribbean Waters of Puerto Rico from 10 NM to 17N',
'75651' : 'Coastal waters from Deerfield Beach to Ocean Reef FL out 20 NM',
'59124' : 'Big Island Southeast Waters',
'59123' : 'Big Island Leeward Waters',
'59122' : 'Big Island Windward Waters',
'59119' : 'Maalaea Bay',
'59118' : 'Maui County Leeward Waters',
'59120' : 'Pailolo Channel',
'59116' : 'Kaiwi Channel',
'59115' : 'Oahu Leeward Waters',
'59113' : 'Kauai Channel',
'59114' : 'Oahu Windward Waters',
'59112' : 'Kauai Leeward Waters',
'59111' : 'Kauai Windward Waters',
'59110' : 'Kauai Northwest Waters',
'75710' : 'Atlantic Waters of Puerto Rico AND USVI from 10 NM to 19.5N',
'75722' : 'Anegada Passage Southward to 17N',
'75715' : 'Coastal Waters of Northern USVI and Culebra out 10 NM',
'61150' : 'Coastal waters of Tututila and Aunuu',
'61151' : 'Coastal waters of Manua',
'75650' : 'Coastal waters from Jupiter Inlet to Deerfield Beach FL out 20 NM',
'75630' : 'Biscayne Bay',
'77052' : 'Straits of Florida from Ocean Reef to Craig Key out 20 NM',
'77031' : 'Florida Bay including Barnes Sound, Blackwater Sound, and Buttonwood Sound',
'77053' : 'Straits of Florida from Craig Key to west end of Seven Mile Bridge out 20 NM',
'77054' : 'Straits of Florida from west end of Seven Mile Bridge to south of Halfmoon Shoal out 20 NM',
'77075' : 'Straits of Florida from Halfmoon Shoal to 20 NM west of Dry Tortugas 20 to 60 NM out',
'77073' : 'Straits of Florida from Craig Key to west end of Seven Mile Bridge 20 to 60 NM out',
'77072' : 'Straits of Florida from Ocean Reef to Craig Key 20 to 60 NM out',
'77033' : 'Gulf waters from East Cape Sable to Chokoloskee 20 to 60 NM out and beyond 5 fathoms',
'77042' : 'Hawk Channel from Ocean Reef to Craig Key out to the reef',
'77044' : 'Hawk Channel from west end of Seven Mile Bridge to Halfmoon Shoal out to the reef',
'77074' : 'Straits of Florida from west end of Seven Mile Bridge to south of Halfmoon Shoal 20 to 60 NM out',
'77034' : 'Gulf of Mexico including Dry Tortugas and Rebecca Shoal Channel',
'77055' : 'Straits of Florida from Halfmoon Shoal to 20 NM west of Dry Tortugas out 20 NM',
'77043' : 'Hawk Channel from Craig Key to west end of Seven Mile Bridge out to the reef',
'77035' : 'Gulf of Mexico from West End of Seven Mile Bridge to Halfmoon Shoal out to 5 Fathoms',
'77230' : 'Bays and Waterways from Baffin Bay to Port Aransas',
'65151' : 'Guam Coastal Waters',
'65152' : 'Rota Coastal Waters',
'65153' : 'Tinian Coastal Waters',
'65161' : 'Koror Palau Coastal Waters',
'65171' : 'Yap Coastal Waters',
'65172' : 'Chuuk Coastal Waters',
'65173' : 'Pohnpei Coastal Waters',
'65174' : 'Kosrae Coastal Waters',
'65181' : 'Majuro Coastal Waters',
'65191' : 'Waters out to 40 Nautical Miles',
'73530' : 'Chesapeake Bay north of Pooles Island MD',
'73430' : 'Delaware Bay waters north of East Point NJ to Slaughter Beach DE',
'73431' : 'Delaware Bay waters south of East Point NJ to Slaughter Beach DE',
'73454' : 'Coastal waters from Cape May NJ to Cape Henlopen DE out 20 nm',
'73455' : 'Coastal waters from Cape Henlopen to Fenwick Island DE out 20 nm',
'73650' : 'Coastal waters from Fenwick Island DE to Chincoteague VA out 20 nm',
'73652' : 'Coastal waters from Chincoteague to Parramore Island VA out 20 nm',
'73654' : 'Coastal waters from Parramore Island to Cape Charles Light VA out 20 nm',
'73538' : 'Patapsco River including Baltimore Harbor',
'73531' : 'Chesapeake Bay from Pooles Island to Sandy Point MD',
'73539' : 'Chester River to Queenstown MD',
'73540' : 'Eastern Bay',
'73533' : 'Chesapeake Bay from North Beach to Drum Point MD',
'73534' : 'Chesapeake Bay from Drum Point MD to Smith Point VA',
'73535' : 'Tidal Potomac from Key Bridge to Indian Head MD',
'73536' : 'Tidal Potomac from Indian Head to Cobb Island MD',
'73630' : 'Chesapeake Bay from Smith Point to Windmill Point VA',
'73537' : 'Tidal Potomac from Cobb Island MD to Smith Point VA',
'73631' : 'Chesapeake Bay from Windmill Point to New Point Comfort VA',
'73632' : 'Chesapeake Bay from New Point Comfort to Little Creek VA',
'73532' : 'Chesapeake Bay from Sandy Point to North Beach MD',
'73453' : 'Coastal waters from Great Egg Inlet to Cape May NJ out 20 nm',
'73452' : 'Coastal waters from Little Egg Inlet to Great Egg Inlet NJ out 20 nm',
'73450' : 'Coastal waters from Sandy Hook to Manasquan Inlet NJ out 20 nm',
'73338' : 'New York Harbor',
'73353' : 'Fire Island Inlet NY to Moriches Inlet NY out 20 nm',
'73350' : 'Moriches Inlet NY to Montauk Point NY out 20 nm',
'73256' : 'Coastal Waters from Montauk NY to Marthas Vineyard extending out to 20 nm South of Block Island',
'73340' : 'Peconic and Gardiners Bays',
'73335' : 'Long Island Sound West of New Haven CT/Port Jefferson NY',
'73235' : 'Rhode Island Sound',
'73232' : 'Nantucket Sound',
'73255' : 'Coastal Waters extending out to 25 nm South of Marthas Vineyard and Nantucket',
'73254' : 'Coastal waters from Provincetown MA to Chatham MA to Nantucket MA out 20 nm',
'73231' : 'Cape Cod Bay',
'73230' : 'Boston Harbor',
'73251' : 'Massachusetts Bay and Ipswich Bay',
'73250' : 'Coastal waters east of Ipswich Bay and the Stellwagen Bank National Marine Sanctuary',
'73153' : 'Casco Bay',
'73152' : 'Coastal Waters from Port Clyde, ME to Cape Elizabeth, ME out 25 NM',
'73150' : 'Coastal Waters from Stonington, ME to Port Clyde, ME out 25 NM',
'73051' : 'Coastal Waters from Schoodic Point, ME to Stonington, ME out 25 NM',
'73050' : 'Coastal Waters from Eastport, ME to Schoodic Point, ME out 25 NM',
'73151' : 'Penobscot Bay',
'73633' : 'Currituck Sound',
'75250' : 'Coastal waters from Surf City to Cape Fear NC out 20 nm',
'75254' : 'Coastal waters from Little River Inlet to Murrells Inlet SC out 20 nm',
'75350' : 'Coastal waters from South Santee River to Edisto Beach SC out 20 nm',
'75330' : 'Charleston Harbor',
'75452' : 'Coastal waters from Fernandina Beach to St. Augustine FL out 20 NM',
'75454' : 'Coastal waters from St. Augustine to Flagler Beach FL out 20 NM',
'77765' : 'Coastal waters from Suwannee River to Keaton Beach out 20 NM',
'77730' : 'Apalachee Bay or Coastal Waters From Keaton Beach to Ochlockonee River Fl out to 20 Nm',
'77755' : 'Coastal Waters From Ochlockonee River to Apalachicola Fl out to 20 Nm',
'77750' : 'Coastal waters from Apalachicola to Destin FL out 20 NM',
'77530' : 'Lake Pontchartrain and Lake Maurepas',
'77435' : 'Vermilion Bay',
'77452' : 'Coastal waters from Intracoastal City to Cameron LA out 20 NM',
'77432' : 'Calcasieu Lake',
'77450' : 'Coastal waters from Cameron LA to High Island TX out 20 NM',
'77430' : 'Sabine Lake',
'57750' : 'Coastal Waters from San Mateo Point to the Mexican Border and out to 30 nm',
'57565' : 'Coastal Waters from Point Pinos to Point Piedras Blancas California out to 10 nm',
'57576' : 'Waters from Point Pinos to Point Piedras Blancas 10-60 NM',
'57560' : 'Coastal Waters from Pigeon Point to Point Pinos California out to 10 nm',
'57535' : 'Monterey Bay',
'57545' : 'Coastal Waters from Point Reyes to Pigeon Point California out to 10 nm',
'57530' : 'San Pablo Bay, Suisun Bay, the West Delta and the San Francisco Bay north of the Bay Bridge',
'57540' : 'Coastal Waters from Point Arena to Point Reyes California out to 10 nm',
'57570' : 'Waters from Point Arena to Point Reyes 10-60 NM',
'57455' : 'Coastal waters from Cape Mendocino to Pt. Arena CA out 10 nm',
'57475' : 'Waters from Cape Mendocino to Pt. Arena CA from 10 to 60 nm',
'57450' : 'Coastal waters from Pt. St. George to Cape Mendocino CA out 10 nm',
'57410' : 'Humboldt Bay Bar',
'57470' : 'Waters from Pt. St. George to Cape Mendocino CA from 10 to 60 nm',
'57376' : 'Waters from Cape Blanco OR to Pt. St. George CA from 10 to 60 nm',
'57350' : 'Coastal waters from Florence to Cape Blanco OR out 10 nm',
'57370' : 'Waters from Florence to Cape Blanco OR from 10 to 60 nm',
'57255' : 'Coastal waters from Cascade Head to Florence OR out 10 nm',
'57275' : 'Waters from Cascade Head to Florence OR from 10 to 60 nm',
'57210' : 'Columbia River Bar',
'57250' : 'Coastal waters from Cape Shoalwater WA to Cascade Head OR out 10 nm',
'57270' : 'Waters from Cape Shoalwater WA to Cascade Head OR from 10 to 60 nm',
'57156' : 'Coastal Waters From Point Grenville To Cape Shoalwater Out 10 Nm',
'57176' : 'Coastal Waters From Point Grenville To Cape Shoalwater 10 To 60 Nm',
'57173' : 'Waters From James Island To Point Grenville 10 To 60 Nm',
'57150' : 'Coastal Waters From Cape Flattery To James Island Out 10 Nm',
'57170' : 'Coastal Waters From Cape Flattery To James Island 10 To 60 Nm',
'57130' : 'West Entrance U.S. Waters Strait of Juan De Fuca',
'57131' : 'Central U.S. Waters Strait of Juan De Fuca',
'57132' : 'East Entrance U.S. Waters Strait of Juan De Fuca',
'57134' : 'Admiralty Inlet',
'91144' : 'Two Harbors to Duluth MN',
'91145' : 'Duluth MN to Port Wing WI',
'91143' : 'Silver Bay Harbor to Two Harbors MN',
'91142' : 'Taconite Harbor to Silver Bay Harbor MN',
'91141' : 'Grand Marais to Taconite Harbor MN',
'91140' : 'Grand Portage to Grand Marais MN',
'91146' : 'Port Wing to Sand Island WI',
'91147' : 'Sand Island to Bayfield WI',
'91121' : 'Chequamegon Bay-Bayfield to Oak Point WI',
'91148' : 'Oak Point to Saxon Harbor WI',
'91240' : 'Saxon Harbor WI to Black River MI',
'91241' : 'Black River To Ontonagon MI',
'91242' : 'Ontonagon to Upper Entrance of Portage Canal MI',
'91243' : 'Upper Entrance of Portage Canal to Eagle River MI',
'91244' : 'Eagle River to Manitou Island MI',
'91245' : 'Manitou Island to Point Isabelle MI',
'91246' : 'Point Isabelle to Lower Entrance of Portage Canal MI',
'91247' : 'Portage Lake to Huron Island MI to Lower Entrance of Portage Canal To Huron Islands MI Including Keweenaw and Huron Bays',
'91248' : 'Huron Islands to Marquette MI',
'91249' : 'Marquette to Munising MI',
'91250' : 'Munising to Grand Marais MI',
'91266' : 'Lake Superior East of a line from Manitou Island to Marquette MI and West of a line from Grand Marais MI to the US/Canadian Border Beyond 5 NM from shore',
'91251' : 'Grand Marais to Whitefish Point MI',
'91267' : 'Lake Superior from Grand Marais MI to Whitefish Point MI 5 NM off shore to the US/Canadian border',
'91321' : 'Whitefish Bay (U.S. Portion)/Whitefish Point to Point Iroquois MI',
'91322' : 'St. Marys River Point Iroquois to E. Potagannissing Bay',
'92743' : 'Calumet Harbor IL to Gary IN',
'92742' : 'Northerly Island to Calumet Harbor IL',
'92741' : 'Wilmette Harbor to Northerly Island IL',
'92740' : 'Winthrop Harbor to Wilmette Harbor IL',
'92745' : 'Burns Harbor to Michigan City IN',
'92043' : 'New Buffalo MI to St Joseph MI',
'92779' : 'Lake Michigan from Wilmette Harbor to Michigan City in 5 NM offshore to Mid Lake',
'92080' : 'Lake Michigan Michigan City IN to St. Joseph MI 5 NM offshore to mid-line of lake.',
'92777' : 'Lake Michigan from Winthrop Harbor to Wilmette Harbor IL 5 NM offshore to Mid Lake',
'92878' : 'Lake Michigan from St Joseph to South Haven MI 5 NM offshore to Mid Lake',
'92646' : 'Wind Point WI to Winthrop Harbor IL',
'92645' : 'North Point Light to Wind Point WI',
'92644' : 'Port Washington to North Point Light WI',
'92643' : 'Sheboygan to Port Washington WI',
'92543' : 'Two Rivers to Sheboygan WI',
'92845' : 'South Haven to Holland MI',
'92675' : 'Lake Michigan from Wind Point WI to Winthrop Harbor IL 5 NM offshore to Mid Lake',
'92876' : 'Lake Michigan from South Haven to Holland MI 5 NM offshore to Mid lake',
'92673' : 'Lake Michigan from North Point Light to Wind Point WI 5 NM offshore to Mid Lake',
'92671' : 'Lake Michigan from Port Washington to North Point Light WI 5 NM offshore to Mid Lake',
'92669' : 'Lake Michigan from Sheboygan to Port Washington WI 5 NM offshore to Mid Lake',
'92870' : 'Lake Michigan from Whitehall to Pentwater MI 5 NM offshore to Mid Lake',
'92567' : 'Lake Michigan from Two Rivers to Sheboygan WI 5 NM offshore to Mid Lake',
'92868' : 'Lake Michigan from Pentwater to Manistee MI 5 NM offshore to Mid Lake',
'92542' : 'Sturgeon Bay to Two Rivers WI',
'92565' : 'Lake Michigan from Sturgeon Bay to Two Rivers WI 5 NM offshore to Mid Lake',
'92366' : 'Lake Michigan from Point Betsie to Manistee MI 5 NM offshore to Mid Lake',
'92346' : 'Manistee to Point Betsie MI',
'92744' : 'Gary to Burns Harbor IN',
'92046' : 'Michigan City IN to New Buffalo MI',
'92844' : 'St Joseph to South Haven MI',
'92522' : 'Green Bay south of line from Oconto WI to Little Sturgeon Bay WI',
'92521' : 'Green Bay south of line from Cedar River to Rock Island Passage and north of a line from Oconto WI to Little Sturgeon Bay WI',
'92221' : 'Green Bay North of line from Cedar River MI to Rock Island Passage',
'92541' : 'Rock Island Passage to Sturgeon Bay WI',
'92250' : '5NM East of a line from Fairport MI to Rock Island Passage',
'92563' : 'Lake Michigan from Rock Island Passage to Sturgeon Bay WI 5 NM offshore to mid lake',
'92248' : 'Seul Choix Point to Point Detour MI',
'92364' : 'Lake Michigan from Charlevoix to Point Betsie MI 5 NM Offshore to mid lake',
'92261' : 'Lake Michigan from Seul Choix Point to Rock Island Passage 5 NM offshore to Mid Lake',
'92362' : 'Lake Michigan South of a line from Seul Choix Point to the Mackinac Bridge and North of a line from Charlevoix MI to South Fox Island 5 NM offshore',
'92344' : 'Sleeping Bear Point to Grand Traverse Light MI',
'92342' : 'Norwood MI to 5 NM West of Mackinac Bridge including Little Traverse Bay',
'93361' : 'Lake Huron from 5 NM east of Mackinac Bridge to Presque Isle Lt to the US/Canadian border beyond 5 NM from shore',
'93362' : 'Lake Huron from Presque Isle Lt. to Sturgeon Point MI 5 NM off shore to US/Canadian border',
'93346' : 'St Ignace to False Detour Channel',
'93363' : 'Lake Huron from Sturgeon Point to Alabaster MI 5 NM off shore to US/Canadian border',
'93349' : 'Sturgeon Pt to Alabaster MI',
'93347' : '5NM East of Mackinac Bridge to Presque Isle Light MI including Bois Blanc Island',
'93421' : 'Outer Saginaw Bay SW of Alabaster to Port Austin MI to Inner Saginaw Bay',
'93441' : 'Port Austin to Harbor Beach MI',
'93462' : 'Lake Huron from Port Austin to Harbor Beach 5 NM Off Shore to the US/Canadian border',
'93442' : 'Harbor Beach to Port Sanilac MI',
'93463' : 'Lake Huron from Harbor Beach to Port Sanilac 5 NM Off Shore to US/Canadian border',
'93464' : 'Lake Huron from Port Sanilac to Port Huron 5 NM Off Shore to US/Canadian border',
'94460' : 'Lake St. Clair Open Lake (U.S. Portion)',
'94423' : 'Detroit River',
'93443' : 'Port Sanilac to Port Huron MI',
'96444' : 'Michigan Waters of Lake Erie from Detroit River to North Cape MI',
'96142' : 'Maumee Bay to Reno Beach OH',
'96162' : 'Detroit River Lt. to Maumee Bay OH to Reno Beach OH beyond 5 NM offshoreline to US-Canadian border',
'96143' : 'Reno Beach to The Islands OH',
'96144' : 'The Islands to Vermilion OH',
'96163' : 'Reno Beach to The Islands OH beyond 5 NM off shoreline to US-Canadian border',
'96145' : 'Vermilion to Avon Point OH',
'96146' : 'Avon Point to Willowick OH',
'96164' : 'The Islands to Vermilion OH beyond 5 nm off shoreline to US-Canadian border',
'96165' : 'Vermilion to Avon Point OH beyond 5 nm off shoreline to US-Canadian border',
'96166' : 'Avon Point to Willowick OH beyond 5 nm off shoreline to US-Canadian border',
'96147' : 'Willowick to Geneva-on-the Lake OH',
'96167' : 'Willowick to Geneva-on-the-Lake OH beyond 5 NM off shoreline to US-Canadian border',
'96148' : 'Geneva-on-the-Lake to Conneaut OH',
'96168' : 'Geneva-on-the-Lake to Conneaut OH beyond 5 nm off shoreline to US-Canadian border',
'96149' : 'Conneaut OH to Ripley NY',
'96169' : 'Conneaut OH to Ripley NY beyond 5 nm off shoreline to US-Canadian border',
'96061' : 'Ripley to Buffalo NY extending from 5 NM off shoreline to US-Canadian border',
'97042' : 'Niagara River to Hamlin Beach NY',
'97062' : 'Niagara River to Hamlin Beach NY beyond 5 NM off shoreline to US-Canadian border',
'97043' : 'Hamlin Beach to Sodus Bay NY',
'97063' : 'Hamlin Beach to Sodus Bay NY beyond 5 NM off shoreline to US-Canadian border',
'97044' : 'Sodus Bay to Mexico Bay NY',
'97064' : 'Sodus Bay to Mexico Bay NY beyond 5 NM off shoreline to US-Canadian border',
'97045' : 'Mexico Bay NY to the St. Lawrence River',
'98022' : 'St. Lawrence River above Ogdensburg NY',
'98024' : 'St. Lawrence River from Ogdensburg to St. Regis NY',
'97065' : 'Mexico Bay NY to the St. Lawrence River beyond 5 NM off shoreline to US-Canadian border',
'91162' : 'Lake Superior west of a line from Saxon Harbor WI to Grand Portage MN beyond 5NM',
'91265' : 'Lake Superior West of Line from Manitou Island to Marquette MI Beyond 5 NM from shore',
'93422' : 'Inner Saginaw Bay SW of Point Au Gres to Bay Port MI',
'75450' : 'Coastal waters from Altamaha Sound to Fernandina Beach FL out 20 NM',
'73052' : 'Intra Coastal Waters from Schoodic Point, ME to Stonington, ME',
'73658' : 'Coastal waters from NC VA border to Currituck Beach Light NC out 20 nm',
'77455' : 'Coastal waters from Lower Atchafalaya River to Intracoastal City LA out 20 NM',
'73170' : 'Waters from Stonington ME to Merrimack River MA from 25 to 40 nm',
'73271' : 'Ocean Waters from Provincetown to Nantucket from 20 to 35 NM offshore',
'73070' : 'Waters from Eastport ME to Stonington (Deer Isle) ME from 25 to 40 nm',
'73273' : 'Ocean Waters from Montauk NY to Marthas Vineyard from 25 to 40 NM offshore',
'73272' : 'Ocean Waters from Marthas Vineyard to Nantucket from 25 to 45 NM offshore',
'73370' : 'Waters from Montauk Point NY to Sandy Hook NJ from 20 to 40 nm',
'73470' : 'Waters from Sandy Hook NJ to Fenwick Island DE from 20 to 40 nm',
'73670' : 'Waters from Fenwick Island DE to Currituck Beach Light NC from 20 to 40 nm',
'75370' : 'Waters from South Santee River SC to Savannah GA extending from 20 nm to 40 nm',
'75470' : 'Waters from Altamaha Sound GA to Fernandina Beach FL from 20 to 60 NM',
'75374' : 'Waters from Savannah GA to Altamaha Sound GA extending from 20 to 60 nm',
'75472' : 'Waters from Fernandina Beach to St. Augustine FL from 20 to 60 NM',
'75474' : 'Waters from St. Augustine to Flagler Beach FL from 20 to 60 NM',
'57775' : 'Waters from San Mateo point to the Mexican Border Extending 30 to 60 nm out including San Clemente Island',
'57310' : 'Coos Bay Bar',
'77534' : 'Lake Borgne',
'77557' : 'Coastal waters from Pascagoula Mississippi to Stake Island out 20 NM',
'57330' : 'Chetco River Bar',
'73656' : 'Coastal Waters from Cape Charles Light to Virginia-North Carolina border out to 20 nm',
'73634' : 'Chesapeake Bay from Little Creek VA to Cape Henry VA including the Chesapeake Bay Bridge Tunnel',
'73154' : 'Coastal Waters from Cape Elizabeth, ME to Merrimack River, MA out 25 NM',
'57356' : 'Coastal waters from Cape Blanco OR to Pt. St. George CA out 10 nm',
'94422' : 'St. Clair River',
'73541' : 'Choptank River to Cambridge MD and the Little Choptank River',
'96040' : 'Ripley to Dunkirk NY',
'77532' : 'Mississippi Sound',
'77536' : 'Chandeleur Sound',
'77538' : 'Breton Sound',
'73451' : 'Coastal waters from Manasquan Inlet to Little Egg Inlet NJ out 20 nm',
'96020' : 'Upper Niagara River and Buffalo Harbor',
'91263' : 'Lake Superior from Saxon Harbor WI to Upper Entrance to Portage Canal MI 5 NM off shore to the US/Canadian border including Isle Royal National Park',
'91264' : 'Lake Superior from Upper Entrance to Portage Canal to Manitou Island MI 5 NM off shore to the US/Canadian Border',
'92846' : 'Holland to Grand Haven MI',
'92847' : 'Grand Haven to Whitehall MI',
'92849' : 'Pentwater to Manistee MI',
'92848' : 'Whitehall to Pentwater MI',
'92874' : 'Lake Michigan from Holland to Grand Haven MI 5 NM offshore to Mid Lake',
'92872' : 'Lake Michigan from Grand Haven to Whitehall MI 5 NM offshore to Mid Lake',
'92341' : 'Seul Choix Point to 5 NM West of Mackinac Bridge',
'93345' : 'Straits of Mackinac within 5 nm of Mackinac Bridge including Mackinac Island',
'92345' : 'Point Betsie to Sleeping Bear Point MI',
'92323' : 'Grand Traverse Bay south of a line Grand Traverse Light to Norwood MI',
'93348' : 'Presque Isle Light to Sturgeon Pt MI Including Thunder Bay National Marine Sanctuary',
'73543' : 'Tangier Sound and the inland waters surrounding Bloodsworth Island',
'73330' : 'Long Island Sound East of New Haven CT/Port Jefferson NY',
'57110' : 'Grays Harbor Bar',
'96041' : 'Dunkirk to Buffalo NY',
'75352' : 'Coastal waters from Edisto Beach SC to Savannah GA out 20 nm',
'75354' : 'Coastal waters from Savannah GA to Altamaha Sound GA out 20 nm ...including Grays Reef National Marine Sanctuary',
'77850' : 'Coastal waters from Tarpon Springs to Suwannee River FL out 20 NM',
'77355' : 'Coastal waters from High Island to Freeport TX out 20 NM',
'77350' : 'Coastal waters from Freeport to Matagorda Ship Channel TX out 20 NM',
'77550' : 'Coastal Waters from Port Fourchon LA to Lower Atchafalaya River LA out 20 nm',
'77770' : 'Waters from Apalachicola to Destin FL from 20 to 60 NM',
'77775' : 'Waters from Suwannee River to Apalachicola FL from 20 to 60 NM',
'77870' : 'Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM',
'77575' : 'Coastal Waters from Stake Island LA to Southwest Pass of the Mississippi River from 20 to 60 nm',
'77472' : 'Waters from Intracoastal City to Cameron LA from 20 to 60 NM',
'77475' : 'Waters from Lower Atchafalaya River to Intracoastal City LA from 20 to 60 NM',
'77570' : 'Coastal waters from Port Fourchon Louisiana to Lower Atchafalaya River LA from 20 to 60 NM',
'77375' : 'Waters from High Island to Freeport TX from 20 to 60 NM',
'77470' : 'Waters from Cameron LA to High Island TX from 20 to 60 NM',
'77577' : 'Coastal waters from Pascagoula Mississippi to Stake Island Louisiana out 20 to 60 NM',
'77572' : 'Coastal waters from Southwest Pass of the Mississippi River to Port Fourchon Louisiana from 20 to 60 NM',
'77555' : 'Coastal Waters from Boothville LA to Southwest Pass of the Mississippi River out 20 nm',
'77552' : 'Coastal waters from the Southwest Pass of the Mississippi River to Port Fourchon Louisiana out 20 NM',
'65154' : 'Saipan Coastal Waters',
'59121' : 'Alenuihaha Channel',
'59117' : 'Maui County Windward Waters',
'75256' : 'Coastal waters from Murrells Inlet to South Santee River SC out 20 nm',
'97030' : 'Lower Niagara River',
'77330' : 'Matagorda Bay',
'73542' : 'Patuxent River to Broomes Island MD',
'57153' : 'Coastal Waters From James Island To Point Grenville Out 10 Nm',
'73355' : 'Sandy Hook NJ to Fire Island Inlet NY out 20 nm',
'73345' : 'South Shore Bays from Jones Inlet through Shinnecock Bay',
'73636' : 'York River',
'77235' : 'Bays and Waterways from Port Aransas to Port O?Connor',
'73635' : 'Rappahannock River from Urbanna to Windmill Point',
'75670' : 'Waters from Jupiter Inlet to Deerfield Beach FL from 20 to 60 NM',
'73638' : 'James River from James River Bridge to Hampton Roads Bridge-Tunnel',
'57135' : 'Puget Sound and Hood Canal',
'57133' : 'Northern Inland Waters Including The San Juan Islands',
'77335' : 'Galveston Bay',
'77655' : 'Coastal waters from Destin to Pensacola FL out 20 NM',
'77650' : 'Coastal waters from Pensacola FL to Pascagoula MS out 20 NM',
'77670' : 'Waters from Pensacola FL to Pascagoula MS from 20 to 60 NM',
'77675' : 'Waters from Destin to Pensacola FL from 20 to 60 NM',
'77631' : 'South Mobile Bay',
'77632' : 'Mississippi Sound',
'77634' : 'Pensacola Bay Area',
'77635' : 'Choctawhatchee Bay',
'77630' : 'North Mobile Bay',
'57655' : 'Inner waters from Point Mugu to San Mateo Pt. CA including Santa Catalina and Anacapa Islands',
'57650' : 'East Santa Barbara Channel from Pt. Conception to Pt. Mugu CA including Santa Cruz Island',
'57676' : 'Outer waters from Santa Cruz Island to San Clemente Island to 60 NM offshore including San Nicolas and Santa Barbara Islands',
'57673' : 'Waters from Pt. Sal to Santa Cruz Island CA and westward 60 nm including San Miguel and Santa Rosa Islands',
'57645' : 'Point Piedras Blancas to Point Sal westward out to 10 NM',
'77032' : 'Bayside and Gulf side from Craig Key to West End of Seven Mile Bridge',
'75252' : 'Coastal waters from Cape Fear NC to Little River Inlet SC out 20 nm',
'73637' : 'James River from Jamestown to the James River Bridge',
'58245' : 'Flaxman Island to Demarcation Point',
'58240' : 'Cape Halkett to Flaxman Island',
'58235' : 'Point Franklin to Cape Halkett',
'58230' : 'Cape Beaufort to Point Franklin',
'58225' : 'Cape Thompson to Cape Beaufort',
'58220' : 'Wales to Cape Thompson',
'58215' : 'Kotzebue Sound',
'58200' : 'Norton Sound',
'58210' : 'Dall Point to Wales',
'58180' : 'Kuskokwim Delta and Etolin Strait',
'58170' : 'Cape Sarichef to Nikoski Bering Side',
'58179' : 'Pribilof Islands Nearshore Waters',
'58155' : 'Castle Cape to Cape Sarichef',
'58150' : 'Sitkinak to Castle Cape',
'58138' : 'Shelikof Strait',
'58132' : 'Shuyak Island To Sitkinak',
'58139' : 'Cook Inlet Kalgin Island to Point Bede',
'58125' : 'Prince William Sound',
'58128' : 'Valdez Arm',
'58127' : 'Valdez Narrows',
'58126' : 'Port of Valdez',
'58052' : 'Icy Cape to Cape Suckling',
'58051' : 'Cape Fairweather to Icy Cape',
'58053' : 'Yakutat Bay',
'58022' : 'Cross Sound',
'58042' : 'Cape Decision to Cape Edgecumbe',
'58041' : 'Dixon Entrance to Cape Decision',
'58033' : 'Southern Chatham Strait',
'58035' : 'Sumner Strait',
'58034' : 'Frederick Sound',
'58031' : 'Stephens Passage',
'58032' : 'Northern Chatham Strait',
'58021' : 'Icy Strait',
'58011' : 'Glacier Bay',
'58013' : 'Southern Lynn Canal',
'58012' : 'Northern Lynn Canal',
'58043' : 'Southeast Alaska Outside Waters From Cape Edgecumbe to Cape Fairweather',
'58036' : 'Clarence Strait',
'58129' : 'Western Prince William Sound',
'58136' : 'Chiniak Bay',
'58137' : 'Marmot Bay',
'58119' : 'Cape Suckling to Cape Cleare',
'58121' : 'Resurrection Bay',
'58141' : 'Kachemak Bay',
'58131' : 'Barren Islands East',
'58171' : 'Unalaska Bay',
'58140' : 'Cook Inlet North Kalgin Island',
'58130' : 'West of Barren Islands Including Kamishak Bay',
'58160' : 'Bristol Bay',
'58181' : 'North and West of Nunivak Island',
'58174' : 'Nikolski to Seguam Pacific Side',
'58173' : 'Nikolski to Seguam Bering Side',
'58176' : 'Seguam to Adak Pacific Side',
'58175' : 'Seguam to Adak Bering Side',
'58172' : 'Cape Sarichef to Nikoski Pacific Side',
'58165' : 'Port Heiden to Cape Sarichef',
'58120' : 'Cape Cleare to Gore Point',
'58178' : 'Kiska to Attu',
'58177' : 'Adak to Kiska',
'58185' : 'St Matthew Island Waters',
'77856' : 'Coastal waters from Bonita Beach to Englewood FL out 20 NM',
'77836' : 'Charlotte Harbor and Pine Island Sound',
'57575' : 'Waters from Pigeon Point to Point Pinos 10-60 NM',
'57571' : 'Waters from Point Reyes to Pigeon Point 10-60 NM',
'57531' : 'San Francisco Bay South of the Bay Bridge',
'61152' : 'Coastal waters of Swain\'s Island',
'75555' : 'Sebastian Inlet to Jupiter Inlet 0-20 nm',
'75575' : 'Sebastian Inlet to Jupiter Inlet 20-60 nm',
'75570' : 'Flagler Beach to Volusia-Brevard County Line 20-60 nm',
'75550' : 'Flagler Beach to Volusia-Brevard County Line 0-20 nm',
'75552' : 'Volusia-Brevard County Line to Sebastian Inlet 0-20 nm',
'75572' : 'Volusia-Brevard County Line to Sebastian Inlet 20-60 nm',
'77633' : 'Perdido Bay Area',
'75152' : 'S of Oregon Inlet NC to Cape Hatteras NC out to 20 nm',
'75156' : 'S of Ocracoke Inlet NC to Cape Lookout NC out to 20 nm',
'75154' : 'S of Cape Hatteras NC to Ocracoke Inlet NC out to 20 nm',
'75158' : 'S of Cape Lookout NC to Surf City NC out to 20 nm',
'75170' : 'Waters from Currituck Beach Light to Surf City NC from 20 to 40 nm',
'75150' : 'S of Currituck Beach Light NC to Oregon Inlet NC out to 20 nm',
'75130' : 'Albemarle Sound',
'75131' : 'Alligator River',
'75136' : 'Pamlico and Pungo Rivers',
'75137' : 'Neuse and Bay Rivers',
'57670' : 'Point Piedras Blancas to Point Sal from 10 to 60 NM',
'75270' : 'Waters from Surf City NC to South Santee River SC from 20 to 40 nm',
'75135' : 'Pamlico Sound',
'73270' : 'Ocean Waters from the Merrimack River to Plymouth from 40 to 60 NM offshore',
'73234' : 'Buzzards Bay',
'73233' : 'Vineyard Sound',
'73236' : 'Narragansett Bay',
'73237' : 'Block Island Sound',
'75031' : 'Caribbean from 11N to 15N between 72W and 80W including Colombia Basin',
'75013' : 'Caribbean N of 18N between 76W and 85W including Cayman Basin',
'75037' : 'Tropical N Atlantic from 7N and 15N between 55W and 60W',
'75121' : 'Atlantic from 22N to 27N between 65W and 70W',
'75011' : 'Caribbean Nof 18N W of 85W including Yucatan Basin',
'75017' : 'Gulf of Honduras',
'75019' : 'Caribbean from 15N to 18N between 80W and 85W',
'75021' : 'Caribbean from 15N to 18N between 72W and 80W',
'75015' : 'Caribbean approaches to the Windward Passage',
'75039' : 'SW Caribbean S of 11N including the approaches to the Panama Canal',
'75029' : 'W Central Caribbean from 11N to 15N W of 80W',
'75023' : 'Caribbean N of 15N between 64W and 72W',
'75027' : 'Tropical N Atlantic from 15N to 19N between 55W and 60W',
'75113' : 'Atlantic from 27N to 31N between 70W and 77W',
'75125' : 'Atlantic S of 22N between 65W and 70W including Puerto Rico Trench',
'75123' : 'Atlantic S of 22N W of 70W including approaches to the Windward Passage',
'75025' : 'Offshore Waters Leeward Islands',
'75035' : 'Offshore Waters Windward Islands including Trinidad and Tobago',
'75117' : 'Bahamas including Cay Sal Bank',
'75033' : 'Caribbean S of 15N between 64W and 72W including Venezuela Basin',
'75127' : 'Atlantic from 19N to 22N between 55W and 65W',
'75119' : 'Atlantic from 22N to 27N E of Bahamas to 70W',
'75115' : 'Atlantic from 27N to 31N between 65W and 70W',
'75111' : 'Atlantic from 27N to 31N W of 77W',
'73805' : 'Georges Bank between Cape Cod and 68W north of 1000 FM',
'73810' : 'South of New England between the Great South Channel and Montauk Point to 1000 FM',
'73820' : 'Hudson Canyon to Baltimore Canyon to 1000 FM',
'73925' : 'Baltimore Canyon to Hatteras Canyon between 100 NM and 250 NM offshore',
'57900' : 'Cape Flattery to Cape Shoalwater between 150 NM and 250 NM offshore',
'57915' : 'Florence, OR to Point St. George between 150 NM and 250 NM offshore',
'57920' : 'Point St. George to Point Arena between 150 NM and 250 NM offshore',
'57930' : 'Pigeon Point to Point Piedras Blancas between 150 NM and 250 NM offshore',
'57940' : 'Santa Cruz Island, CA to 120W between 150 NM and 250 NM offshore',
'77019' : 'Central Gulf from 22N to 26N between 87W and 94W',
'77015' : 'NE Gulf N of 25N E of 87W',
'77013' : 'N Central Gulf including Flower Garden Banks Marine Sanctuary',
'77011' : 'NW Gulf including Stetson Bank',
'77025' : 'E Bay of Campeche including Campeche Bank',
'77017' : 'W Central Gulf from 22N to 26N W of 94W',
'77021' : 'E Gulf from 22N to 25N E of 87W including Straits of Florida',
'77023' : 'SW Gulf S of 22N W of 94W',
'58510' : 'Eastern US Arctic Offshore',
'58505' : 'Central US Arctic Offshore',
'58500' : 'Western US Arctic Offshore',
'58310' : 'Gulf of Alaska North of 55 Degrees North and East of 144 W',
'58411' : 'Bering Sea Offshore West of 180 and East of the International Date Line',
'58351' : 'Gulf of Alaska Offshore North of 57N and West of 144W',
'58352' : 'Gulf of Alaska Offshore South of 57N North of 55N and West of 144W',
'58413' : 'Bering Sea Offshore 171W to 180 and South of 56N',
'58414' : 'Bering Sea Offshore East of 171W',
'58412' : 'Bering Sea Offshore 171W to 180 and North of 56N',
'59180' : 'Offshore Waters Within 240 nm of Honolulu',
'73800' : 'Gulf of Maine to the Hague Line',
'73900' : 'Georges Bank between 68W and the Hague Line',
'73815' : 'South of Long Island between Montauk Point and Sandy Hook to 1000 FM',
'73905' : 'East of 69W to the Hague Line between 1000 FM and 39N',
'73910' : 'East of 69W and south of 39N to 250 NM offshore',
'73920' : 'Baltimore Canyon to 69W east of 1000 FM and south of 38.5N to 250 NM offshore',
'73915' : 'Between 1000FM and 38.5 N west of 69 W',
'73825' : 'Baltimore Canyon to Cape Charles Light to 100 NM offshore',
'73828' : 'Cape Charles Light to Currituck Beach Light to 100 NM offshore',
'73833' : 'Cape Hatteras to Cape Fear to 100 NM Offshore.',
'73830' : 'Currituck Beach Light to Cape Hatteras to 100 NM offshore',
'73930' : 'Hatteras Canyon to Cape Fear between 100 NM and 250 NM offshore',
'73835' : 'Cape Fear to 31N to 1000 FM',
'73935' : 'Cape Fear to 31N east of 1000 FM to 250 NM offshore',
'57840' : 'Santa Cruz Island, CA to San Clemente Island, CA between 60 NM and 150 NM offshore',
'57835' : 'Point Piedras Blancas to Santa Cruz Island, CA between 60 NM and 150 NM offshore',
'57935' : 'Point Piedras Blancas to Santa Cruz Island, CA between 150 NM and 250 NM offshore',
'57800' : 'Cape Flattery to Cape Shoalwater between 60 NM and 150 NM offshore',
'57905' : 'Cape Shoalwater to Cape Lookout between 150 NM and 250 NM offshore',
'57805' : 'Cape Shoalwater to Cape Lookout between 60 NM and 150 NM offshore',
'57910' : 'Cape Lookout to Florence, OR between 150 NM and 250 NM offshore',
'57810' : 'Cape Lookout to Florence, OR between 60 NM and 150 NM offshore',
'57815' : 'Florence, OR to Point St. George between 60 NM and 150 NM offshore',
'57820' : 'Point St. George to Point Arena between 60 NM and 150 NM offshore',
'57925' : 'Point Arena to Pigeon Point between 150 NM and 250 NM offshore',
'57825' : 'Point Arena to Pigeon Point between 60 NM and 150 NM offshore',
'57830' : 'Pigeon Point to Point Piedras Blancas between 60 NM and 150 NM offshore',
'57945' : 'San Clemente Island, CA to Guadalupe Island from 60 NM offshore west to 120W',
'XXXXX' : 'TEST',
}
CA_SAME_CODE={
'LOCATION' : 'CA',
#Nova Scotia
'11100' : 'Kings County',
'11200' : 'Annapolis County',
'11300' : 'Digby County',
'11400' : 'Lunenburg County',
'11500' : 'Queens County',
'11600' : 'Shelburne County',
'11700' : 'Yarmouth County',
'12100' : 'Cumberland County - Minas Shore',
'12200' : 'Cumberland County North and Cobequid Pass',
'12300' : 'Colchester County - Cobequid Bay',
'12400' : 'Colchester County North',
'12500' : 'Hants County',
'12600' : 'Colchester County - Truro and south',
'12700' : 'Halifax Metro and Halifax County West',
'12800' : 'Halifax County - east of Porters Lake',
'13100' : 'Pictou County',
'13200' : 'Guysborough County',
'13300' : 'Antigonish County',
'14100' : 'Richmond County',
'14200' : 'Inverness County - south of Mabou',
'14300' : 'Inverness County - Mabou and north',
'14400' : 'Victoria County',
'14500' : 'Sydney Metro and Cape Breton County',
#New Brunswick
'15100' : 'Fredericton and southern York County',
'15200' : 'Oromocto and Sunbury County',
'15300' : 'Grand Lake and Queens County',
'15400' : 'Sussex/Kennebecasis Valley and Kings County',
'15500' : 'Saint John and County',
'15600' : 'St. Stephen and northern Charlotte County',
'15700' : 'Grand Manan and coastal Charlotte County',
'16100' : 'Woodstock and Carleton County',
'16200' : 'Stanley-Doaktown-Blackville Area',
'16300' : 'Kent County',
'16400' : 'Moncton and southeast New Brunswick',
'16500' : 'Kouchibouguac National Park',
'16600' : 'Fundy National Park',
'17100' : 'Edmundston and Madawaska County',
'17200' : 'Campbellton and Restigouche County',
#sub-regions
'17210' : 'Western half of Restigouche County',
'17220' : 'Campbellton and eastern half of Restigouche County',
'17300' : 'Grand Falls and Victoria County',
'17400' : 'Mount Carleton-Renous Highway',
'17500' : 'Bathurst and Chaleur Region',
'17600' : 'Miramichi and Area',
'17700' : 'Acadian Peninsula',
#Prince Edward Island
'18100' : 'Kings County',
'18200' : 'Prince County',
'18300' : 'Queens County',
#Newfoundland and Labrador
'21100' : 'Avalon Peninsula North',
'21200' : 'Avalon Peninsula Southeast',
'21300' : 'St. John\'s and vicinity',
#sub-regions
'21310' : 'North of La Manche',
'21320' : 'La Manche and south',
'21400' : 'Avalon Peninsula Southwest',
'22100' : 'Burin Peninsula',
'22200' : 'Connaigre',
'22300' : 'Burgeo to Ramea',
'22400' : 'Channel-Port aux Basques and vicinity',
#sub-regions
'22410' : 'South Coast',
'22420' : 'West Coast',
'23100' : 'Corner Brook and vicinity',
'23200' : 'Deer Lake - Humber Valley',
'23300' : 'Gros Morne',
'23400' : 'Bay St. George',
'24100' : 'Buchans and the Interior',
'24200' : 'Grand Falls-Windsor and vicinity',
'25100' : 'Bay of Exploits',
'25200' : 'Bonavista North',
'25300' : 'Bonavista Peninsula',
'25400' : 'Clarenville and vicinity',
'25500' : 'Gander and vicinity',
'25600' : 'Green Bay - White Bay',
'25700' : 'Terra Nova',
'26100' : 'Northern Peninsula East',
'26200' : 'Parson\'s Pond - Hawke\'s Bay',
'26300' : 'Port Saunders and the Straits',
'27100' : 'Red Bay to L\'Anse-au-Clair',
'27200' : 'Norman Bay to Lodge Bay',
'27300' : 'Cartwright to Black Tickle',
'27400' : 'Rigolet and vicinity',
'27500' : 'Postville - Makkovik',
'27600' : 'Hopedale and vicinity',
'27700' : 'Nain and vicinity',
'28100' : 'Upper Lake Melville',
'28200' : 'Eagle River',
'28300' : 'Churchill Valley',
'29100' : 'Churchill Falls and vicinity',
'29200' : 'Labrador City and Wabush',
#Quebec
'31100' : 'Gatineau',
'31200' : 'Waskaganish',
'31300' : 'Matagami',
#sub-regions
'31310' : 'Joutel - Matagami area',
'31320' : 'Miquelon - Desmaraisville area',
'31400' : 'Abitibi',
#sub-regions
'31410' : 'La Sarre area',
'31420' : 'Amos area',
'31430' : 'Lebel-sur-Quevillon area',
'31440' : 'Senneterre area',
'31450' : 'Rouyn area',
'31460' : 'Malartic area',
'31470' : 'Val d\'Or - Louvicourt area',
'31500' : 'Temiscamingue',
#sub-regions
'31510' : 'Ville-Marie area',
'31520' : 'Temiscaming area',
'31600' : 'Reserve Faunique La Verendrye',
#sub-regions
'31610' : 'Dorval Lodge area',
'31620' : 'Le Domaine area',
'31700' : 'Pontiac',
#sub-regions
'31710' : 'Rapides-des-Joachims area',
'31720' : 'Fort William - Shawville area',
'31800' : 'Mont-Laurier',
#sub-regions
'31810' : 'Mont-Laurier area',
'31820' : 'La Minerve - Rouge River area',
'31900' : 'Upper Gatineau-Lievre-Papineau',
#sub-regions
'31910' : 'Low - Wakefield area',
'31920' : 'Maniwaki - Gracefield area',
'31930' : 'Papineauville - Cheneville area',
'31940' : 'Papineau-Labelle Reserve area',
'31950' : 'Lievre River area',
'32100' : 'Parc du Mont-Tremblant - Saint-Michel-des-Saints',
#sub-regions
'32110' : 'Saint-Michel-des-Saints area',
'32120' : 'Saint-Donat - Parc du Mont-Tremblant area',
'32200' : 'Laurentides',
#sub-regions
'32210' : 'Mont-Tremblant - Sainte-Agathe area',
'32220' : 'Sainte-Adele - Saint-Sauveur area',
'32300' : 'Lachute-Saint-Jerome',
#sub-regions
'32310' : 'Saint-Jerome area',
'32320' : 'Lachute area',
'32330' : 'Saint-Eustache area',
'32400' : 'Montreal Metropolitain - Laval',
#sub-regions
'32410' : 'Longueuil - Varennes area',
'32420' : 'Laval area',
'32430' : 'Montreal Island area',
'32440' : 'Chateauguay - La Prairie area',
'32500' : 'Vaudreuil - Soulanges - Huntingdon',
#sub-regions
'32510' : 'Soulanges area',
'32520' : 'Vaudreuil area',
'32530' : 'Saint-Remi area',
'32540' : 'Valleyfield - Beauharnois area',
'32550' : 'Hemmingford area',
'32560' : 'Huntingdon area',
'32600' : 'Lanaudiere',
#sub-regions
'32610' : 'Berthierville - Saint-Gabriel area',
'32620' : 'Rawdon - Joliette area',
'32630' : 'Mascouche area',
'32700' : 'Mauricie',
#sub-regions
'32710' : 'Louiseville area',
'32720' : 'Matawin - Mauricie National Park area',
'32730' : 'Lac-aux-Sables area',
'32740' : 'Shawinigan area',
'32750' : 'Sainte-Anne-de-la-Perade area',
'32760' : 'Trois-Rivieres area',
'32800' : 'Quebec',
#sub-regions
'32811' : 'Portneuf area',
'32812' : 'Valcartier - Stoneham area',
'32813' : 'Quebec area',
'32814' : 'Cote-de-Beaupre - L\'Ile d\'Orleans area',
'32821' : 'Bellechasse area',
'32822' : 'Levis area',
'32823' : 'Saint-Lambert area',
'32824' : 'Lotbiniere area',
'33100' : 'James-Bay and La-Grande-Riviere',
'33200' : 'LG-Quatre - Laforge and Fontanges',
'33300' : 'Schefferville',
'33400' : 'Fermont',
'34100' : 'Chibougamau',
'34200' : 'Parent-Reservoir Gouin',
#sub-regions
'34210' : 'Gouin Reservoir area',
'34220' : 'Parent area',
'34300' : 'Lac-Saint-Jean',
#sub-regions
'34310' : 'Ashuapmushuan Wildlife Reserve area',
'34320' : 'Normandin - Peribonka area',
'34330' : 'Alma - Desbiens area',
'34340' : 'Saint-Felicien - Roberval area',
'34400' : 'La Tuque',
#sub-regions
'34410' : 'Lake Bouchette area',
'34420' : 'La Tuque area',
'34500' : 'Riviere Manicouagan',
#sub-regions
'34510' : 'Manic-5 area',
'34520' : 'Manic-3 area',
'34530' : 'Chutes-des-Passes - Pipmuacan Reservoir area',
'34540' : 'Labrieville area',
'34600' : 'Les Escoumins-Forestville',
#sub-regions
'34610' : 'Forestville area',
'34620' : 'Les Escoumins area',
'34700' : 'Saguenay',
#sub-regions
'34710' : 'Falardeau - Mont-Valin area',
'34720' : 'Sainte-Rose-du-Nord area',
'34730' : 'Ville de Saguenay area',
'34740' : 'Riviere-eternite - Petit-Saguenay area',
'34750' : 'Tadoussac - Baie-Sainte-Catherine area',
'34800' : 'Charlevoix',
#sub-regions
'34810' : 'La Malbaie area',
'34820' : 'Baie-Saint-Paul area',
'34900' : 'Reserve Faunique des Laurentides',
#sub-regions
'34910' : 'Apica Mountain area',
'34920' : 'Grands-Jardins Park area',
'34930' : 'l\'etape area',
'35100' : 'Rimouski - Mont-Joli',
#sub-regions
'35110' : 'Mont-Joli area',
'35120' : 'Le Bic - Rimouski area',
'35130' : 'Rimouski Wildlife Reserve area',
'35200' : 'Matane',
'35300' : 'Amqui-Vallee de la Matapedia',
#sub-regions
'35310' : 'Amqui area',
'35320' : 'Matapedia area',
'35400' : 'Sainte-Anne-des-Monts - Grande-Vallee',
#sub-regions
'35410' : 'Grande-Vallee area',
'35420' : 'Sainte-Anne-des-Monts area',
'35500' : 'Parc National de la Gaspesie - Murdochville',
#sub-regions
'35510' : 'Murdochville area',
'35520' : 'Mont-Albert area',
'35530' : 'Grande-Riviere - Cascapedia area',
'35600' : 'Restigouche-Bonaventure',
#sub-regions
'35610' : 'New-Richmond - Bonaventure area',
'35620' : 'Restigouche - Carleton area',
'35700' : 'New-Carlisle - Chandler',
#sub-regions
'35710' : 'Chandler area',
'35720' : 'New Carlisle - Port-Daniel area',
'35800' : 'Parc National Forillon - Gaspe - Perce',
#sub-regions
'35810' : 'Parc National Forillon - Gaspe area',
'35820' : 'Perce area',
'36100' : 'Baie-Comeau',
'36200' : 'Sept-Iles - Port-Cartier',
#sub-regions
'36210' : 'Baie-Trinite area',
'36220' : 'Sept-Iles - Port-Cartier area',
'36300' : 'Minganie',
'36400' : 'Natashquan',
'36500' : 'Chevery',
'36600' : 'Blanc-Sablon',
'36700' : 'Anticosti',
'36800' : 'Iles-de-la-Madeleine',
'37100' : 'Vallee du Richelieu-Saint-Hyacinthe',
#sub-regions
'37110' : 'Sorel - Yamaska area',
'37120' : 'Saint-Hyacinthe - Acton Vale area',
'37130' : 'Vercheres - Beloeil area',
'37140' : 'Saint-Jean - Marieville area',
'37150' : 'Lacolle area',
'37200' : 'Drummondville - Bois-Francs',
#sub-regions
'37210' : 'Becancour - Villeroy area',
'37220' : 'Victoriaville area',
'37230' : 'Nicolet area',
'37240' : 'Drummondville area',
'37300' : 'Eastern Townships',
#sub-regions
'37311' : 'Granby - Waterloo area',
'37312' : 'Brome-Missisquoi area',
'37321' : 'Richmond area',
'37322' : 'Weedon area',
'37323' : 'Thetford Mines area',
'37331' : 'Mont-Orford - Lake Memphremagog area',
'37332' : 'Sherbrooke area',
'37333' : 'Coaticook area',
'37334' : 'Lac-Megantic area',
'37335' : 'Cookshire area',
'37400' : 'Beauce',
#sub-regions
'37410' : 'Lac-Etchemin area',
'37420' : 'Sainte-Marie-de-Beauce area',
'37430' : 'Saint-Georges-de-Beauce area',
'37500' : 'Montmagny - L\'Islet',
#sub-regions
'37510' : 'Montmagny - Saint-Jean-Port-Joli area',
'37520' : 'Saint-Fabien - Saint-Pamphile area',
'37600' : 'Kamouraska - Riviere-du-Loup - Trois-Pistoles',
#sub-regions
'37610' : 'Trois-Pistoles area',
'37620' : 'Pohenegamook area',
'37630' : 'Kamouraska area',
'37640' : 'Riviere-du-Loup area',
'37700' : 'Temiscouata',
'38100' : 'Ivujivik',
'38200' : 'Akulivik',
'38300' : 'Puvirnituq',
'38400' : 'Inukjuak',
'38500' : 'Umiujaq',
'38600' : 'Kuujjuarapik',
'39100' : 'Salluit - Raglan Lake',
'39200' : 'Kangiqsujuaq',
'39300' : 'Quaqtaq',
'39400' : 'Kangirsuk',
'39500' : 'Aupaluk',
'39600' : 'Tasiujaq',
'39700' : 'Kuujjuaq',
'39800' : 'Kangiqsualujjua',
#Ontario
'41100' : 'Simcoe - Delhi - Norfolk',
'41200' : 'Dunnville - Caledonia - Haldimand',
'41300' : 'Sarnia - Lambton',
#sub-regions
'41310' : 'Watford - Pinery Park - Eastern Lambton County',
'41320' : 'Sarnia - Petrolia - Western Lambton County',
'41400' : 'Windsor - Essex - Chatham-Kent',
#sub-regions
'41410' : 'Chatham-Kent - Rondeau Park',
'41420' : 'Windsor - Leamington - Essex County',
'41500' : 'London - Middlesex',
#sub-regions
'41510' : 'London - Parkhill - Eastern Middlesex County',
'41520' : 'Strathroy - Komoka - Western Middlesex County',
'41600' : 'Elgin',
#sub-regions
'41610' : 'St. Thomas - Aylmer - Eastern Elgin County',
'41620' : 'Rodney - Shedden - Western Elgin County',
'41700' : 'Oxford - Brant',
#sub-regions
'41710' : 'Woodstock - Tillsonburg - Oxford County',
'41720' : 'Brantford - County of Brant',
'42100' : 'Attawapiskat',
'42200' : 'Fort Severn',
'42300' : 'Peawanuck',
'42400' : 'Big Trout Lake - Sachigo Lake',
#sub-regions
'42410' : 'Sachigo Lake - Bearskin Lake',
'42420' : 'Big Trout Lake - Kasabonika',
'42500' : 'Sandy Lake - Pikangikum',
#sub-regions
'42510' : 'Sandy Lake - Weagamow Lake - Deer Lake',
'42520' : 'Pikangikum - Poplar Hill - MacDowell',
'42600' : 'Pickle Lake - Wunnummin Lake',
#sub-regions
'42610' : 'Summer Beaver - Wunnummin Lake - Kingfisher Lake',
'42620' : 'Pickle Lake - Cat Lake',
'42700' : 'Fort Hope - Webequie',
#sub-regions
'42710' : 'Webequie',
'42720' : 'Fort Hope - Lansdowne House - Ogoki',
'43100' : 'City of Toronto',
'43200' : 'York - Durham',
#sub-regions
'43210' : 'Uxbridge - Beaverton - Northern Durham Region',
'43220' : 'Newmarket - Georgina - Northern York Region',
'43230' : 'Pickering - Oshawa - Southern Durham Region',
'43240' : 'Vaughan - Richmond Hill - Markham',
'43300' : 'Belleville - Quinte - Northumberland',
#sub-regions
'43310' : 'Belleville - Quinte West - Eastern Northumberland County',
'43320' : 'Cobourg - Colborne - Western Northumberland County',
'43400' : 'Stirling - Tweed - South Frontenac',
#sub-regions
'43410' : 'Tamworth - Sydenham - South Frontenac',
'43420' : 'Stirling - Tweed - Madoc',
'43500' : 'Kingston - Prince Edward',
#sub-regions
'43510' : 'Kingston - Odessa - Frontenac Islands',
'43520' : 'Napanee - Consecon',
'43530' : 'Picton - Sandbanks Park',
'43600' : 'Brockville - Leeds and Grenville',
#sub-regions
'43610' : 'Merrickville-Wolford - Kemptville',
'43620' : 'Westport - Charleston Lake',
'43630' : 'Brockville - Prescott',
'43640' : 'Gananoque - Mallorytown',
'43700' : 'Cornwall - Morrisburg',
#sub-regions
'43710' : 'Maxville - Alexandria',
'43720' : 'Cornwall - Lancaster',
'43730' : 'Winchester - Newington',
'43740' : 'Morrisburg - Long Sault',
'44100' : 'Barrie - Orillia - Midland',
#sub-regions
'44110' : 'Midland - Coldwater - Orr Lake',
'44120' : 'Orillia - Lagoon City - Washago',
'44130' : 'Barrie - Collingwood - Hillsdale',
'44200' : 'Burk\'s Falls - Bayfield Inlet',
#sub-regions
'44210' : 'South River - Burk\'s Falls',
'44220' : 'Bayfield Inlet - Dunchurch',
'44300' : 'Algonquin',
#sub-regions
'44310' : 'Deep River - Whitney - Eastern Algonquin Park',
'44320' : 'Western Algonquin Park - Lake of Two Rivers',
'44400' : 'Parry Sound - Muskoka',
#sub-regions
'44410' : 'Huntsville - Baysville',
'44420' : 'Town of Parry Sound - Rosseau - Killbear Park',
'44430' : 'Port Carling - Port Severn',
'44440' : 'Bracebridge - Gravenhurst',
'44500' : 'Haliburton',
#sub-regions
'44510' : 'Oxtongue Lake - Fort Irwin - Northern Haliburton County',
'44520' : 'Haliburton - Minden - Southern Haliburton County',
'44600' : 'Bancroft - Bon Echo Park',
#sub-regions
'44610' : 'Bancroft - Hastings Highlands - Denbigh',
'44620' : 'Kaladar - Bannockburn - Bon Echo Park',
'44700' : 'Peterborough - Kawartha Lakes',
#sub-regions
'44710' : 'Fenelon Falls - Balsam Lake Park - Northern Kawartha Lakes',
'44720' : 'Apsley - Woodview - Northern Peterborough County',
'44730' : 'Lindsay - Southern Kawartha Lakes',
'44740' : 'Peterborough City - Lakefield - Southern Peterborough County',
'45100' : 'Prescott and Russell',
'45200' : 'Renfrew - Pembroke - Barry\'s Bay',
#sub-regions
'45210' : 'Petawawa - Pembroke - Cobden',
'45220' : 'Barry\'s Bay - Killaloe',
'45230' : 'Renfrew - Arnprior - Calabogie',
'45300' : 'Smiths Falls - Lanark - Sharbot Lake',
#sub-regions
'45310' : 'Smiths Falls - Perth - Eastern Lanark County',
'45320' : 'Plevna - Sharbot Lake - Western Lanark County',
'45400' : 'City of Ottawa',
#sub-regions
'45410' : 'Ottawa North - Kanata - Orleans',
'45420' : 'Ottawa South - Richmond - Metcalfe',
'46100' : 'City of Hamilton',
'46200' : 'Grey - Bruce',
#sub-regions
'46210' : 'Bruce Peninsula - Sauble Beach - Tobermory',
'46220' : 'Owen Sound - Blue Mountains - Northern Grey County',
'46230' : 'Saugeen Shores - Kincardine - Southern Bruce County',
'46240' : 'Hanover - Dundalk - Southern Grey County',
'46300' : 'Huron - Perth',
#sub-regions
'46310' : 'Wingham - Blyth - Northern Huron County',
'46320' : 'Listowel - Milverton - Northern Perth County',
'46330' : 'Goderich - Bluewater - Southern Huron County',
'46340' : 'Stratford - Mitchell - Southern Perth County',
'46400' : 'Waterloo - Wellington',
#sub-regions
'46410' : 'Mount Forest - Arthur - Northern Wellington County',
'46420' : 'Guelph - Erin - Southern Wellington County',
'46430' : 'Kitchener - Cambridge - Region of Waterloo',
'46500' : 'Dufferin - Innisfil',
#sub-regions
'46510' : 'Innisfil - New Tecumseth - Angus',
'46520' : 'Shelburne - Mansfield - Northern Dufferin County',
'46530' : 'Orangeville - Grand Valley - Southern Dufferin County',
'46600' : 'Halton - Peel',
#sub-regions
'46610' : 'Caledon',
'46620' : 'Mississauga - Brampton',
'46630' : 'Halton Hills - Milton',
'46640' : 'Burlington - Oakville',
'46700' : 'Niagara',
#sub-regions
'46710' : 'St. Catharines - Grimsby - Northern Niagara Region',
'46720' : 'Niagara Falls - Welland - Southern Niagara Region',
'47100' : 'Red Lake - Ear Falls',
#sub-regions
'47110' : 'Red Lake - Woodland Caribou Park',
'47120' : 'Ear Falls - Perrault Falls - Western Lac Seul',
'47200' : 'Sioux Lookout - Savant Lake',
#sub-regions
'47210' : 'Savant Lake - Sturgeon Lake',
'47220' : 'Sioux Lookout - Eastern Lac Seul',
'47300' : 'Kenora - Nestor Falls',
#sub-regions
'47310' : 'Kenora - Grassy Narrows - Whitedog',
'47320' : 'Sioux Narrows - Nestor Falls - Morson',
'47400' : 'Dryden - Ignace',
#sub-regions
'47410' : 'Dryden - Vermilion Bay',
'47420' : 'Ignace - English River',
'47500' : 'Fort Frances - Rainy Lake',
#sub-regions
'47510' : 'Fort Frances - Emo - Rainy River',
'47520' : 'Seine River Village - Mine Centre',
'48100' : 'City of Thunder Bay',
'48200' : 'Lake Nipigon - Wabakimi',
#sub-regions
'48210' : 'Armstrong - Auden - Wabakimi Park',
'48220' : 'Beardmore - Jellicoe - Macdiarmid',
'48230' : 'Gull Bay - Black Sturgeon Lake',
'48300' : 'Geraldton - Manitouwadge - Hornepayne',
#sub-regions
'48310' : 'Nakina - Aroland - Pagwa',
'48320' : 'Geraldton - Longlac - Caramat',
'48330' : 'Manitouwadge - Hornepayne',
'48400' : 'Atikokan - Upsala - Quetico',
#sub-regions
'48410' : 'Upsala - Raith',
'48420' : 'Atikokan - Shebandowan - Quetico Park',
'48500' : 'Superior West',
#sub-regions
'48510' : 'Cloud Bay - Dorion',
'48520' : 'Kakabeka Falls - Whitefish Lake - Arrow Lake',
'48600' : 'Nipigon - Marathon - Superior North',
#sub-regions
'48610' : 'Nipigon - Rossport',
'48620' : 'Marathon - Schreiber',
'48700' : 'Wawa - White River - Pukaskwa',
#sub-regions
'48710' : 'White River - Dubreuilville',
'48720' : 'Wawa - Pukaskwa Park',
'48800' : 'Sault Ste. Marie - Superior East',
#sub-regions
'48810' : 'Agawa - Lake Superior Park',
'48820' : 'Searchmont - Montreal River Harbour - Batchawana Bay',
'48830' : 'Sault Ste. Marie - St. Joseph Island',
'49100' : 'Greater Sudbury and vicinity',
'49200' : 'Elliot Lake - Ranger Lake',
'49300' : 'Moosonee - Fort Albany',
#sub-regions
'49310' : 'Fort Albany',
'49320' : 'Moosonee',
'49400' : 'Kapuskasing - Hearst',
#sub-regions
'49410' : 'Fraserdale - Pledger Lake',
'49420' : 'Kapuskasing - Hearst - Smooth Rock Falls',
'49500' : 'Timmins - Cochrane',
#sub-regions
'49510' : 'Little Abitibi - Kesagami Lake',
'49520' : 'Timmins - Cochrane - Iroquois Falls',
'49600' : 'Chapleau - Gogama',
#sub-regions
'49610' : 'Chapleau - Missinaibi Lake',
'49620' : 'Gogama - Foleyet',
'49700' : 'Kirkland Lake - New Liskeard - Temagami',
#sub-regions
'49710' : 'Kirkland Lake - Englehart',
'49720' : 'New Liskeard - Temagami',
'49800' : 'North Bay - West Nipissing',
#sub-regions
'49810' : 'West Nipissing - French River',
'49820' : 'North Bay - Powassan - Mattawa',
'49900' : 'Manitoulin - Northshore - Killarney',
#sub-regions
'49910' : 'Blind River - Thessalon',
'49920' : 'Espanola - Killarney',
'49930' : 'Manitoulin Island',
#Manitoba
'51100' : 'Virden - Souris',
#sub-regions
'51110' : 'R.M. of Wallace',
'51120' : 'R.M. of Woodworth',
'51130' : 'R.M. of Daily',
'51140' : 'R.M. of Whitehead',
'51150' : 'R.M. of Sifton',
'51160' : 'R.M. of Pipestone',
'51170' : 'R.M. of Glenwood',
'51200' : 'Brandon - Carberry - Treherne',
#sub-regions
'51211' : 'R.M. of Elton',
'51212' : 'R.M. of Cornwallis',
'51213' : 'R.M. of Oakland',
'51220' : 'City of Brandon',
'51231' : 'R.M. of North Cypress',
'51232' : 'R.M. of South Cypress',
'51241' : 'R.M. of North Norfolk',
'51242' : 'R.M. of South Norfolk',
'51243' : 'R.M. of Victoria',
'51300' : 'Melita - Boissevain - Turtle Mountain Provincial Park',
#sub-regions
'51311' : 'R.M. of Albert',
'51312' : 'R.M. of Cameron',
'51313' : 'R.M. of Whitewater',
'51321' : 'R.M. of Edward',
'51322' : 'R.M. of Arthur',
'51331' : 'R.M. of Brenda',
'51332' : 'R.M. of Winchester',
'51340' : 'R.M. of Morton',
'51400' : 'Killarney - Pilot Mound - Manitou',
#sub-regions
'51411' : 'R.M. of Riverside',
'51412' : 'R.M. of Turtle Mountain',
'51421' : 'R.M. of Strathcona',
'51422' : 'R.M. of Argyle',
'51431' : 'R.M. of Roblin',
'51432' : 'R.M. of Louise',
'51441' : 'R.M. of Lorne',
'51442' : 'R.M. of Pembina',
'52100' : 'City of Winnipeg',
'52200' : 'Selkirk - Gimli - Stonewall - Woodlands - Eriksdale',
#sub-regions
'52211' : 'R.M. of Eriksdale',
'52212' : 'R.M. of Coldwell',
'52221' : 'R.M. of Armstrong (north)',
'52222' : 'R.M. of Armstrong (south)',
'52223' : 'R.M. of Gimli',
'52231' : 'R.M. of St. Laurent',
'52232' : 'R.M. of Woodlands',
'52241' : 'R.M. of Rockwood (Teulon)',
'52242' : 'R.M. of St. Andrews (Dunnottar)',
'52243' : 'R.M. of Rockwood (Stonewall)',
'52244' : 'R.M. of St. Andrews (St. Andrews)',
'52251' : 'R.M. of Rosser',
'52252' : 'R.M. of West St. Paul',
'52300' : 'Portage la Prairie - Headingley - Brunkild - Carman',
#sub-regions
'52311' : 'R.M. of Portage la Prairie (St. Ambroise)',
'52312' : 'R.M. of Portage la Prairie (Portage la Prairie)',
'52321' : 'R.M. of St Francois Xavier',
'52322' : 'R.M. of Cartier',
'52323' : 'R.M. of Headingley',
'52331' : 'R.M. of Grey',
'52332' : 'R.M. of Dufferin',
'52340' : 'R.M. of MacDonald',
'52400' : 'Dugald - Beausejour - Grand Beach',
#sub-regions
'52411' : 'R.M. of Alexander',
'52412' : 'R.M. of St. Clements (Grand Beach and Brokenhead First Nation)',
'52421' : 'R.M. of St. Clements (Libau)',
'52422' : 'R.M. of Brokenhead',
'52431' : 'R.M. of Springfield',
'52432' : 'R.M. of Victoria Beach',
'52434' : 'R.M. of East St. Paul',
'52500' : 'Morden - Winkler - Altona - Emerson - Morris',
#sub-regions
'52510' : 'R.M. of Morris',
'52520' : 'R.M. of Thompson',
'52530' : 'R.M. of Roland',
'52540' : 'R.M. of Montcalm',
'52550' : 'R.M. of Rhineland',
'52560' : 'R.M. of Stanley',
'52600' : 'Steinbach - St. Adolphe - Dominion City - Vita - Richer',
#sub-regions
'52611' : 'R.M. of Ritchot',
'52612' : 'R.M. of Tache',
'52620' : 'R.M. of De Salaberry',
'52630' : 'R.M. of Hanover',
'52641' : 'R.M. of Ste. Anne',
'52642' : 'R.M. of La Broquerie',
'52651' : 'R.M. of Franklin',
'52652' : 'R.M. of Stuartburn',
'53100' : 'Bissett - Nopiming Provincial Park - Pine Falls',
#sub-regions
'53110' : 'Manigotagan, Black River First Nation and Hollow Water First Nation',
'53120' : 'Bissett',
'53130' : 'Nopiming Provincial Park',
'53140' : 'R.M. of Alexander (Pine Falls and Great Falls)',
'53150' : 'R.M. of Alexander west of Great Falls',
'53200' : 'Whiteshell - Lac Du Bonnet - Pinawa',
#sub-regions
'53211' : 'R.M. of Lac Du Bonnet',
'53212' : 'R.M. of Reynolds',
'53221' : 'L.G.D. of Pinawa',
'53222' : 'R.M. of Whitemouth',
'53231' : 'Point du Bois',
'53232' : 'Falcon Lake and West Hawk Lake',
'53241' : 'R.M. of Reynolds north of Highway 1',
'53242' : 'R.M. of Reynolds south of Highway 1',
'53243' : 'Shoal Lake First Nation',
'53300' : 'Sprague - Northwest Angle Provincial Forest',
#sub-regions
'53310' : 'Buffalo Point First Nation and Northwest Angle Provincial Forest',
'53320' : 'R.M. of Piney (west)',
'53330' : 'R.M. of Piney (east)',
'54100' : 'Grand Rapids',
#sub-regions
'54110' : 'Grand Rapids and Easterville',
'54120' : 'Waterhen, Meadow Portage and Skownan',
'54200' : 'Arborg - Hecla - Fisher River - Gypsumville - Ashern',
#sub-regions
'54211' : 'R.M. of Grahamdale (north)',
'54212' : 'R.M. of Grahamdale (central)',
'54221' : 'Jackhead First Nation',
'54222' : 'R.M. of Fisher (north)',
'54231' : 'R.M. of Siglunes',
'54232' : 'R.M. of Grahamdale (south)',
'54241' : 'R.M. of Fisher (south)',
'54242' : 'R.M. of Bifrost',
'54243' : 'Moose Creek Provincial Forest, Pine Dock and Matheson Island',
'55100' : 'Dauphin - Roblin - Winnipegosis',
#sub-regions
'55111' : 'R.M. of Park (north)',
'55112' : 'R.M. of Shell River',
'55113' : 'R.M. of Hillsburg',
'55114' : 'R.M. of Shellmouth',
'55115' : 'R.M. of Boulton',
'55121' : 'R.M. of Dauphin',
'55122' : 'R.M. of Ethelbert',
'55123' : 'R.M. of Gilbert Plains',
'55131' : 'R.M. of Mossey River',
'55132' : 'R.M. of Grandview',
'55133' : 'R.M. of Ochre River',
'55200' : 'Minnedosa - Neepawa - Russell - Riding Mountain National Park',
#sub-regions
'55211' : 'R.M. of Russell',
'55212' : 'R.M. of Silver Creek',
'55221' : 'R.M. of Rossburn',
'55222' : 'Riding Mountain National Park',
'55223' : 'R.M. of Park (south)',
'55231' : 'R.M. of Ellice',
'55232' : 'R.M. of Birtle',
'55233' : 'R.M. of Archie',
'55234' : 'R.M. of Miniota',
'55241' : 'R.M. of Shoal Lake',
'55242' : 'R.M. of Strathclair',
'55243' : 'R.M. of Hamiota',
'55244' : 'R.M. of Blanshard',
'55251' : 'R.M. of Harrison',
'55252' : 'R.M. of Saskatchewan',
'55253' : 'R.M. of Clanwilliam',
'55254' : 'R.M. of Minto',
'55255' : 'R.M. of Odanah',
'55261' : 'R.M. of Langford',
'55262' : 'R.M. of Rosedale',
'55300' : 'Ste. Rose - McCreary - Alonsa - Gladstone',
#sub-regions
'55311' : 'R.M. of Lawrence',
'55312' : 'R.M. of Alonsa (north)',
'55321' : 'R.M. of Ste. Rose',
'55322' : 'R.M. of Alonsa (central)',
'55331' : 'R.M. of McCreary',
'55332' : 'R.M. of Alonsa (south)',
'55340' : 'R.M. of Glenella',
'55351' : 'R.M. of Lansdowne',
'55352' : 'R.M. of Westbourne',
'55353' : 'R.M. of Lakeview',
'56100' : 'The Pas - Wanless - Westray - Clearwater Lake Provincial Park',
#sub-regions
'56110' : 'The Pas, Cormorant, Westray and Wanless',
'56120' : 'North and South Moose Lakes',
'56200' : 'Swan River - Duck Mountain Provincial Park - Porcupine Provincial Forest',
#sub-regions
'56211' : 'Westgate, Red Deer Lake and Barrows',
'56212' : 'Porcupine Provincial Forest',
'56221' : 'R.M. of Mountain (northwest)',
'56222' : 'Pelican Rapids and Shoal River First Nations',
'56231' : 'R.M. of Swan River',
'56232' : 'R.M. of Minitonas',
'56241' : 'Duck Mountain Provincial Park and Forest',
'56242' : 'R.M. of Mountain (southeast)',
'57100' : 'Norway House - Cross Lake - Wabowden',
#sub-regions
'57110' : 'Molson Lake and Bear Lake',
'57120' : 'Cross Lake and Jenpeg',
'57130' : 'Wabowden',
'57140' : 'Highway 6 between Little Limestone Lake and Ponton',
'57150' : 'Norway House',
'57200' : 'Poplar River',
'57300' : 'Berens River - Little Grand Rapids - Bloodvein - Atikaki',
#sub-regions
'57310' : 'Berens River',
'57320' : 'Little Grand Rapids and Atikaki Provincial Park',
'57330' : 'Bloodvein',
'58100' : 'Brochet',
'58200' : 'Tadoule Lake',
'58300' : 'Lynn Lake - Leaf Rapids - Pukatawagan',
#sub-regions
'58310' : 'Lynn Lake',
'58320' : 'Leaf Rapids',
'58330' : 'Pukatawagan',
'58400' : 'Thompson - Nelson House - Split Lake',
#sub-regions
'58410' : 'South Indian Lake and Amisk Provincial Natural Park',
'58420' : 'Split Lake and York Landing',
'58430' : 'Thompson,Thicket Portage and Pikwitonei',
'58440' : 'Nelson House',
'58500' : 'Flin Flon - Cranberry Portage - Snow Lake',
#sub-regions
'58510' : 'Snow Lake and Herb Lake Landing',
'58520' : 'Flin Flon, Cranberry Portage and Grass River Provincial Park',
'59100' : 'Churchill',
'59200' : 'York',
'59300' : 'Gillam',
'59400' : 'Shamattawa',
'59500' : 'Island Lake - Oxford House - Gods Lake',
#sub-regions
'59510' : 'Oxford House and Gods Lake',
'59520' : 'Red Sucker Lake',
'59530' : 'Island Lake',
#Saskatchewan
'61100' : 'Leader - Gull Lake',
#sub-regions
'61111' : 'R.M. of Deer Forks',
'61112' : 'R.M. of Happyland',
'61113' : 'R.M. of Enterprise',
'61114' : 'R.M. of Fox Valley',
'61115' : 'R.M. of Big Stick',
'61121' : 'R.M. of Clinworth',
'61122' : 'R.M. of Miry Creek',
'61123' : 'R.M. of Pittville',
'61131' : 'R.M. of Piapot',
'61132' : 'R.M. of Gull Lake',
'61133' : 'R.M. of Carmichael',
'61200' : 'Swift Current - Herbert - Cabri - Kyle - Lucky Lake',
#sub-regions
'61211' : 'R.M. of Lacadena',
'61212' : 'R.M. of Victory',
'61213' : 'R.M. of Canaan',
'61221' : 'R.M. of Riverside',
'61222' : 'R.M. of Saskatchewan Landing',
'61223' : 'R.M. of Webb',
'61224' : 'R.M. of Swift Current',
'61225' : 'R.M. of Lac Pelletier',
'61231' : 'R.M. of Excelsior',
'61232' : 'R.M. of Morse',
'61233' : 'R.M. of Coulee',
'61234' : 'R.M. of Lawtonia',
'61235' : 'R.M. of Glen Bain',
'61236' : 'R.M. of Whiska Creek',
'61300' : 'Shaunavon - Maple Creek - Val Marie - Cypress Hills',
#sub-regions
'61311' : 'R.M. of Maple Creek',
'61312' : 'Cypress Hills Provincial Park',
'61313' : 'R.M. of Reno',
'61321' : 'R.M. of Arlington',
'61322' : 'R.M. of White Valley',
'61323' : 'R.M. of Frontier',
'61331' : 'R.M. of Bone Creek',
'61332' : 'R.M. of Grassy Creek',
'61333' : 'R.M. of Wise Creek',
'61341' : 'R.M. of Lone Tree',
'61342' : 'R.M. of Val Marie',
'61351' : 'R.M. of Auvergne',
'61352' : 'R.M. of Pinto Creek',
'61353' : 'R.M. of Glen McPherson',
'61354' : 'R.M. of Mankota',
'62100' : 'City of Regina',
'62200' : 'Moose Jaw - Pense - Central Butte - Craik',
#sub-regions
'62211' : 'R.M. of Maple Bush',
'62212' : 'R.M. of Huron',
'62213' : 'R.M. of Enfield',
'62214' : 'R.M. of Eyebrow',
'62221' : 'R.M. of Craik',
'62222' : 'R.M. of Sarnia',
'62223' : 'R.M. of Marquis',
'62224' : 'R.M. of Dufferin',
'62231' : 'R.M. of Chaplin',
'62232' : 'R.M. of Wheatlands',
'62233' : 'R.M. of Shamrock',
'62234' : 'R.M. of Rodgers',
'62241' : 'R.M. of Caron',
'62242' : 'R.M. of Moose Jaw',
'62243' : 'R.M. of Pense',
'62244' : 'City of Moose Jaw',
'62245' : 'R.M. of Hillsborough',
'62246' : 'R.M. of Redburn',
'62247' : 'R.M. of Baildon',
'62300' : 'Fort Qu\'Appelle - Indian Head - Lumsden - Pilot Butte',
#sub-regions
'62311' : 'R.M. of McKillop',
'62312' : 'R.M. of Longlaketon',
'62313' : 'R.M. of Lumsden',
'62321' : 'R.M. of Cupar',
'62322' : 'R.M. of Lipton',
'62323' : 'R.M. of Tullymet',
'62331' : 'R.M. of Sherwood',
'62332' : 'R.M. of Bratt\'s Lake',
'62341' : 'R.M. of Lajord',
'62342' : 'R.M. of Francis',
'62343' : 'R.M. of Montmartre and Assiniboine First Nation',
'62351' : 'R.M. of Edenwold and Piapot First Nation',
'62352' : 'R.M. of North Qu\'Appelle',
'62353' : 'R.M. of South Qu\'Appelle',
'62361' : 'R.M. of Abernethy',
'62362' : 'R.M. of Indian Head',
'62400' : 'Assiniboia - Gravelbourg - Coronach',
#sub-regions
'62411' : 'R.M. of Gravelbourg',
'62412' : 'R.M. of Sutton',
'62413' : 'R.M. of Wood River',
'62414' : 'R.M. of Stonehenge',
'62421' : 'R.M. of Waverley',
'62422' : 'R.M. of Old Post',
'62431' : 'R.M. of Lake Johnson',
'62432' : 'R.M. of Terrell',
'62433' : 'R.M. of Lake of the Rivers',
'62434' : 'R.M. of Excel',
'62441' : 'R.M. of Willow Bunch',
'62442' : 'R.M. of Poplar Valley',
'62443' : 'R.M. of Hart Butte',
'62451' : 'R.M. of Elmsthorpe',
'62452' : 'R.M. of Key West',
'62461' : 'R.M. of Bengough',
'62462' : 'R.M. of The Gap',
'62463' : 'R.M. of Happy Valley',
'62464' : 'R.M. of Surprise Valley',
'62500' : 'Estevan - Weyburn - Radville - Milestone',
#sub-regions
'62511' : 'R.M. of Caledonia',
'62512' : 'R.M. of Scott',
'62513' : 'R.M. of Norton',
'62514' : 'R.M. of Brokenshell',
'62521' : 'R.M. of Wellington',
'62522' : 'R.M. of Fillmore',
'62523' : 'R.M. of Weyburn',
'62524' : 'R.M. of Griffin',
'62531' : 'R.M. of Laurier',
'62532' : 'R.M. of Lomond',
'62533' : 'R.M. of Lake Alma',
'62534' : 'R.M. of Souris Valley',
'62541' : 'R.M. of Cymri',
'62542' : 'R.M. of Benson',
'62543' : 'R.M. of Cambria',
'62544' : 'R.M. of Estevan',
'63100' : 'Yorkton - Melville - Esterhazy',
#sub-regions
'63111' : 'R.M. of Garry',
'63112' : 'R.M. of Orkney',
'63113' : 'R.M. of Stanley',
'63114' : 'R.M. of Cana',
'63121' : 'R.M. of McLeod',
'63122' : 'R.M. of Grayson',
'63131' : 'R.M. of Wallace',
'63132' : 'R.M. of Calder',
'63133' : 'R.M. of Saltcoats',
'63134' : 'R.M. of Churchbridge',
'63141' : 'R.M. of Fertile Belt',
'63142' : 'R.M. of Langenburg',
'63143' : 'R.M. of Spy Hill',
'63200' : 'Moosomin - Grenfell - Kipling - Wawota',
#sub-regions
'63211' : 'R.M. of Wolseley',
'63212' : 'R.M. of Elcapo and Cowessess First Nation',
'63221' : 'R.M. of Chester',
'63222' : 'R.M. of Hazelwood',
'63223' : 'R.M. of Golden West and Ocean Man First Nation',
'63224' : 'R.M. of Kingsley',
'63231' : 'R.M. of Silverwood',
'63232' : 'R.M. of Wawken',
'63241' : 'R.M. of Martin',
'63242' : 'R.M. of Moosomin',
'63243' : 'R.M. of Walpole',
'63244' : 'R.M. of Maryfield',
'63251' : 'R.M. of Willowdale and Ochapowace First Nation',
'63252' : 'R.M. of Rocanville',
'63300' : 'Carlyle - Oxbow - Carnduff - Bienfait - Stoughton',
#sub-regions
'63311' : 'R.M. of Tecumseh',
'63312' : 'R.M. of Brock',
'63321' : 'R.M. of Browning',
'63322' : 'R.M. of Coalfields',
'63331' : 'R.M. of Moose Creek',
'63332' : 'R.M. of Enniskillen',
'63341' : 'R.M. of Moose Mountain',
'63342' : 'R.M. of Antler',
'63351' : 'R.M. of Storthoaks',
'63352' : 'R.M. of Reciprocity',
'63353' : 'R.M. of Mount Pleasant',
'63354' : 'R.M. of Argyle',
'64100' : 'Hudson Bay - Porcupine Plain',
#sub-regions
'64110' : 'R.M. of Hudson Bay including Shoal Lake and Red Earth First Nations',
'64120' : 'R.M. of Hudson Bay including Hudson Bay proper and Reserve',
'64130' : 'R.M. of Porcupine',
'64200' : 'Kamsack - Canora - Preeceville',
#sub-regions
'64211' : 'R.M. of Hazel Dell',
'64212' : 'R.M. of Preeceville',
'64213' : 'R.M. of Invermay',
'64214' : 'R.M. of Buchanan',
'64221' : 'R.M. of Insinger',
'64222' : 'R.M. of Good Lake',
'64231' : 'R.M. of Keys and The Key First Nation',
'64232' : 'R.M. of St. Philips',
'64233' : 'R.M. of Sliding Hills',
'64234' : 'R.M. of Cote',
'64241' : 'R.M. of Clayton',
'64242' : 'R.M. of Livingston',
'65100' : 'City of Saskatoon',
'65200' : 'Prince Albert - Shellbrook - Spiritwood - Duck Lake',
#sub-regions
'65211' : 'R.M. of Spiritwood',
'65212' : 'R.M. of Canwood and Big River First Nation',
'65221' : 'R.M. of Meeting Lake and Lucky Man First Nation',
'65222' : 'R.M. of Leask and Mistawasis First Nation',
'65230' : 'R.M. of Shellbrook and Sturgeon Lake First Nation',
'65240' : 'R.M. of Duck Lake and Beardy\'s First Nation',
'65251' : 'R.M. of Lakeland',
'65252' : 'R.M. of Paddockwood',
'65261' : 'R.M. of Buckland and Wahpeton First Nation',
'65262' : 'R.M. of Garden River',
'65263' : 'City of Prince Albert',
'65271' : 'R.M. of Prince Albert',
'65272' : 'R.M. of Birch Hills and Muskoday First Nation',
'65273' : 'R.M. of St. Louis and One Arrow First Nation',
'65300' : 'Melfort - Tisdale - Nipawin - Carrot River',
#sub-regions
'65310' : 'R.M. of Torch River',
'65321' : 'R.M. of Nipawin',
'65322' : 'R.M. of Moose Range',
'65331' : 'R.M. of Kinistino and James Smith First Nation',
'65332' : 'R.M. of Invergordon',
'65333' : 'R.M. of Flett\'s Springs',
'65334' : 'R.M. of Three Lakes',
'65335' : 'R.M. of Lake Lenore',
'65341' : 'R.M. of Willow Creek',
'65342' : 'R.M. of Connaught',
'65343' : 'R.M. of Star City',
'65344' : 'R.M. of Tisdale',
'65345' : 'R.M. of Pleasantdale',
'65346' : 'R.M. of Barrier Valley',
'65351' : 'R.M. of Arborfield',
'65352' : 'R.M. of Bjorkdale',
'65400' : 'Martensville - Warman - Rosthern - Delisle - Wakaw',
#sub-regions
'65411' : 'R.M. of Redberry',
'65412' : 'R.M. of Blaine Lake',
'65413' : 'R.M. of Great Bend',
'65421' : 'R.M. of Laird',
'65422' : 'R.M. of Rosthern',
'65431' : 'R.M. of Eagle Creek',
'65432' : 'R.M. of Corman Park',
'65433' : 'R.M. of Perdue',
'65434' : 'R.M. of Vanscoy',
'65441' : 'R.M. of Aberdeen',
'65442' : 'R.M. of Fish Creek',
'65443' : 'R.M. of Hoodoo',
'65444' : 'R.M. of Grant',
'65445' : 'R.M. of Bayne',
'65451' : 'R.M. of Colonsay',
'65452' : 'R.M. of Viscount',
'65453' : 'R.M. of Blucher',
'65500' : 'Outlook - Watrous - Hanley - Imperial - Dinsmore',
#sub-regions
'65511' : 'R.M. of Harris',
'65512' : 'R.M. of Montrose',
'65513' : 'R.M. of Milden',
'65514' : 'R.M. of Fertile Valley',
'65515' : 'R.M. of King George',
'65516' : 'R.M. of Coteau',
'65521' : 'R.M. of Dundurn',
'65522' : 'R.M. of Rudy',
'65523' : 'R.M. of Rosedale',
'65524' : 'R.M. of Loreburn',
'65531' : 'R.M. of Lost River',
'65532' : 'R.M. of Morris',
'65533' : 'R.M. of McCraney',
'65534' : 'R.M. of Wood Creek',
'65541' : 'R.M. of Arm River',
'65542' : 'R.M. of Willner',
'65543' : 'R.M. of Big Arm',
'65600' : 'Humboldt - Wynyard - Wadena - Lanigan - Foam Lake',
#sub-regions
'65611' : 'R.M. of Humboldt',
'65612' : 'R.M. of St. Peter',
'65613' : 'R.M. of Wolverine',
'65614' : 'R.M. of Leroy',
'65621' : 'R.M. of Spalding',
'65622' : 'R.M. of Ponass Lake',
'65623' : 'R.M. of Lakeside',
'65624' : 'R.M. of Lakeview',
'65631' : 'R.M. of Usborne',
'65632' : 'R.M. of Prairie Rose',
'65633' : 'R.M. of Wreford',
'65634' : 'R.M. of Mount Hope',
'65635' : 'R.M. of Last Mountain Valley',
'65641' : 'R.M. of Big Quill',
'65642' : 'R.M. of Elfros',
'65643' : 'R.M. of Kutawa and Poor Man First Nation',
'65644' : 'R.M. of Emerald',
'65645' : 'R.M. of Touchwood',
'65646' : 'R.M. of Kellross',
'65651' : 'R.M. of Kelvington and Yellowquill First Nation',
'65652' : 'R.M. of Sasman',
'65653' : 'R.M. of Foam Lake and Fishing Lake First Nation',
'65654' : 'R.M. of Ituna Bon Accord',
'66100' : 'Meadow Lake - Big River - Green Lake - Pierceland',
#sub-regions
'66110' : 'R.M. of Meadow Lake and Waterhen First Nation',
'66120' : 'R.M. of Beaver River',
'66130' : 'Green Lake',
'66140' : 'R.M. of Loon Lake',
'66150' : 'R.M. of Big River',
'66200' : 'The Battlefords - Unity - Maidstone - St. Walburg',
#sub-regions
'66211' : 'R.M. of Frenchman Butte',
'66212' : 'R.M. of Mervin',
'66213' : 'R.M. of Turtle River',
'66221' : 'R.M. of Britannia',
'66222' : 'R.M. of Wilton',
'66223' : 'R.M. of Eldon',
'66224' : 'R.M. of Paynton',
'66231' : 'R.M. of Manitou Lake',
'66232' : 'R.M. of Hillsdale',
'66233' : 'R.M. of Senlac',
'66234' : 'R.M. of Round Valley',
'66241' : 'R.M. of Cut Knife',
'66242' : 'R.M. of Battle River and Sweet Grass First Nation',
'66243' : 'R.M. of Buffalo',
'66244' : 'R.M. of Prairie, Red Pheasant and Mosquito First Nations',
'66251' : 'R.M. of Parkdale',
'66252' : 'R.M. of Medstead',
'66253' : 'R.M. of Meota',
'66254' : 'R.M. of Round Hill',
'66260' : 'The Battlefords',
'66271' : 'R.M. of North Battleford',
'66272' : 'R.M. of Douglas',
'66273' : 'R.M. of Mayfield',
'66280' : 'R.M. of Glenside',
'66300' : 'Kindersley - Rosetown - Biggar - Wilkie - Macklin',
#sub-regions
'66311' : 'R.M. of Eye Hill',
'66312' : 'R.M. of Grass Lake',
'66313' : 'R.M. of Heart\'s Hill',
'66314' : 'R.M. of Progress',
'66321' : 'R.M. of Tramping Lake',
'66322' : 'R.M. of Reform',
'66323' : 'R.M. of Mariposa',
'66324' : 'R.M. of Grandview',
'66331' : 'R.M. of Rosemount',
'66332' : 'R.M. of Biggar',
'66341' : 'R.M. of Antelope Park',
'66342' : 'R.M. of Prairiedale',
'66343' : 'R.M. of Milton',
'66351' : 'R.M. of Oakdale',
'66352' : 'R.M. of Winslow',
'66353' : 'R.M. of Kindersley',
'66361' : 'R.M. of Mountain View',
'66362' : 'R.M. of Marriott',
'66363' : 'R.M. of Pleasant Valley',
'66364' : 'R.M. of St. Andrews',
'66371' : 'R.M. of Chesterfield',
'66372' : 'R.M. of Newcombe',
'66381' : 'R.M. of Monet',
'66382' : 'R.M. of Snipe Lake',
'66400' : 'City of Lloydminster - SK',
'67100' : 'Ile a la Crosse - Buffalo Narrows - Beauval',
#sub-regions
'67110' : 'Buffalo Narrows and Peter Pond Lake',
'67120' : 'Ile a la Crosse and Beauval',
'67200' : 'La Ronge - Prince Albert National Park - Narrow Hills Provincial Park',
#sub-regions
'67210' : 'Highway 165 between Highway 2 and Route 914 including Pinehouse Lake',
'67220' : 'Lac La Ronge Provincial Park including La Ronge',
'67230' : 'Candle Lake and Narrow Hills Provincial Parks',
'67240' : 'Montreal Lake and Molanosa',
'67250' : 'Prince Albert National Park',
'67300' : 'Pelican Narrows - Cumberland House - Creighton',
#sub-regions
'67310' : 'Highway 135 including Pelican Narrows and Sandy Bay',
'67320' : 'Seabee Mine',
'67330' : 'Hanson Lake Road east of Hwy 135 including Creighton',
'67340' : 'Hanson Lake Road between Highways 165 and 135',
'67350' : 'Cumberland House',
'68100' : 'Uranium City - Camsell Portage',
'68200' : 'Fond du Lac - Stony Rapids',
#sub-regions
'68210' : 'Fond du Lac',
'68220' : 'Stony Rapids and Black Lake',
'68300' : 'La Loche - Clearwater River Provincial Park - Cluff Lake',
#sub-regions
'68310' : 'Cluff Lake Mine',
'68320' : 'La Loche and Clearwater River Provincial Park',
'68400' : 'Cree Lake - Key Lake',
'68500' : 'Wollaston Lake - Collins Bay',
'68600' : 'Southend - Brabant Lake - Kinoosao',
#Alberta
'71100' : 'Jasper National Park',
#sub-regions
'71110' : 'Jasper National Park near Pocahontas',
'71120' : 'Jasper National Park near Jasper',
'71130' : 'Jasper National Park near Columbia Icefield and Sunwapta Falls',
'71200' : 'Nordegg - Forestry Trunk Road Highway 734',
#sub-regions
'71210' : 'Yellowhead County near Cadomin and Robb',
'71220' : 'Clearwater County near Chungo Creek',
'71230' : 'Clearwater County near Nordegg and Big Horn First Nation',
'71240' : 'Clearwater County near Ya-Ha-Tinda Ranch',
'71300' : 'Rocky Mountain House - Caroline',
#sub-regions
'71310' : 'M.D. of Brazeau near Cynthia and Lodgepole',
'71320' : 'Yellowhead County near Wolf Lake and Dismal Creek',
'71330' : 'Clearwater County near Sunchild First Nation',
'71340' : 'M.D. of Brazeau near the Brazeau Dam',
'71350' : 'Clearwater County near Rocky Mountain House and Crimson Lake',
'71360' : 'Clearwater County near Caroline and James River Bridge',
'71400' : 'Banff National Park',
#sub-regions
'71410' : 'Clearwater County near Siffleur Wilderness area',
'71420' : 'Banff National Park near Saskatchewan River Crossing',
'71430' : 'Banff National Park near Lake Louise',
'71440' : 'Banff National Park near Banff',
'71500' : 'Kananaskis - Canmore',
#sub-regions
'71510' : 'M.D. of Bighorn near Ghost River Wilderness',
'71520' : 'M.D. of Bighorn near Canmore, Bow Valley Park and Ghost Lake',
'71530' : 'Northern Kananaskis Country near Peter Lougheed Provincial Park',
'71540' : 'Southern Kananaskis Country near Highwood and Cataract Creek',
'72100' : 'Red Deer - Ponoka - Innisfail - Stettler',
#sub-regions
'72111' : 'Lacombe County near Eckville',
'72112' : 'Lacombe County near Lacombe, Blackfalds and Gull Lake',
'72113' : 'Lacombe County near Clive and Alix',
'72121' : 'Red Deer County near Sylvan Lake',
'72122' : 'Red Deer County near Spruce View and Red Lodge Provincial Park',
'72123' : 'Red Deer County near Penhold, Innisfail and Bowden',
'72124' : 'Red Deer County near Pine Lake',
'72125' : 'Red Deer County near Elnora, Lousana and Delburne',
'72131' : 'County of Stettler near Stettler, Erskine and Rochon Sands',
'72132' : 'County of Stettler near Big Valley',
'72133' : 'County of Stettler near Donalda',
'72134' : 'County of Stettler near Gadsby',
'72135' : 'County of Stettler near Byemoor',
'72140' : 'City of Red Deer',
'72150' : 'County of Paintearth near Halkirk',
'72160' : 'Flagstaff County near Forestburg',
'72170' : 'County of Camrose near Bashaw',
'72180' : 'Ponoka county near Ponoka, Hobbema and the Sampson First Nation',
'72200' : 'Airdrie - Cochrane - Olds - Sundre',
#sub-regions
'72210' : 'Mountain View County near Sundre',
'72220' : 'Mountain View County near Olds and Didsbury',
'72230' : 'Mountain View County near Cremona',
'72240' : 'Mountain View County near Carstairs',
'72250' : 'M.D. of Rocky View near Airdrie and Crossfield',
'72260' : 'M.D. of Rocky View near Bottrell',
'72270' : 'M.D. of Rocky View near Cochrane',
'72300' : 'Drumheller - Three Hills',
#sub-regions
'72311' : 'Kneehill County near Torrington',
'72312' : 'Kneehill County near Trochu and Dry Island Buffalo Jump Park',
'72313' : 'Kneehill County near Three Hills',
'72321' : 'Kneehill County near Linden and Acme',
'72322' : 'Kneehill County near Carbon',
'72331' : 'Starland County near Rumsey',
'72332' : 'Starland County near Morrin',
'72333' : 'Starland County near Delia',
'72341' : 'Wheatland County near Rockyland and Rosebud',
'72342' : 'Wheatland County near Highways 569 and 848',
'72350' : 'Town of Drumheller',
'72360' : 'M.D. of Rocky View near Irricana and Kathryn',
'72370' : 'Special Area 2 near Finnegan and Little Fish Lake Provincial Park',
'72400' : 'City of Calgary',
'72500' : 'Okotoks - High River - Claresholm',
#sub-regions
'72510' : 'M.D. of Rocky View near Sarcee First Nation',
'72521' : 'M.D. of Foothills near Priddis',
'72522' : 'M.D. of Foothills near Turner Valley',
'72523' : 'M.D. of Foothills near Longview',
'72531' : 'M.D. of Foothills near Okotoks',
'72532' : 'M.D. of Foothills near High River',
'72533' : 'M.D. of Foothills near Cayley',
'72541' : 'M.D. of Willow Creek near Nanton',
'72542' : 'M.D. of Willow Creek near Claresholm and Stavely',
'72600' : 'Brooks - Strathmore - Vulcan',
#sub-regions
'72610' : 'M.D. of Rocky View near Langdon and Dalemead',
'72620' : 'M.D. of Foothills near Blackie',
'72631' : 'Wheatland County near Strathmore, Carseland and Lyalta',
'72632' : 'Wheatland County near Standard',
'72633' : 'Wheatland County near Siksika First Nation and Gleichen',
'72634' : 'Wheatland County near Hussar',
'72641' : 'Vulcan County near Mossleigh and Arrowwood',
'72642' : 'Vulcan County near Vulcan and ensign',
'72643' : 'Vulcan County near Champion',
'72644' : 'Vulcan County near Lomond',
'72645' : 'Vulcan County near Milo',
'72651' : 'Newell County near Bassano',
'72652' : 'Newell County near Gem',
'72653' : 'Newell County near Brooks and Rosemary',
'72654' : 'Newell County near Scandia and Bow City',
'72660' : 'M.D. of Taber near Enchant',
'73100' : 'Crowsnest Pass - Pincher Creek - Waterton Park',
#sub-regions
'73110' : 'M.D. of Ranchland including Chain Lakes Provincial Park',
'73120' : 'M.D. of Pincher Creek near Cowley',
'73130' : 'Peigan First Nation',
'73140' : 'Municipality of Crowsnest Pass',
'73150' : 'M.D. of Pincher Creek near Beauvais Lake Provincial Park',
'73160' : 'M.D. of Pincher Creek near Pincher Creek and Twin Butte',
'73170' : 'Waterton Lakes National Park',
'73200' : 'Cardston - Fort MacLeod - Magrath',
#sub-regions
'73211' : 'M.D. of Willow Creek near Granum',
'73212' : 'M.D. of Willow Creek near Fort MacLeod',
'73221' : 'Blood First Nation including Stand Off',
'73222' : 'Cardston county near Glenwood',
'73231' : 'Cardston county near Magrath and Spring Coulee',
'73232' : 'Cardston county near Del Bonita and Whiskey Gap',
'73241' : 'Cardston county near Mountain View and Police Outpost Provincial Park',
'73242' : 'Cardston county near Cardston and Carway',
'73300' : 'Lethbridge - Taber - Milk River',
#sub-regions
'73311' : 'Lethbridge County near Barons and Nobleford',
'73312' : 'Lethbridge County near Picture Butte and Turin',
'73313' : 'Lethbridge County near Coaldale',
'73314' : 'City of Lethbridge',
'73321' : 'M.D. of Taber near Vauxhall and Hayes',
'73322' : 'M.D. of Taber near Taber and Cranford',
'73323' : 'M.D. of Taber near Grassy Lake',
'73331' : 'Warner County near Raymond and New Dayton',
'73332' : 'Warner County near Wrentham',
'73341' : 'Warner County near Warner',
'73342' : 'Warner County near Milk River and Coutts',
'73343' : 'Warner County near Writing-On-Stone Provincial Park',
'73350' : 'County of Forty Mile near Skiff',
'74100' : 'Hanna - Coronation - Oyen',
#sub-regions
'74111' : 'County of Paintearth near Castor',
'74112' : 'County of Paintearth near Brownfield',
'74113' : 'County of Paintearth near Coronation',
'74121' : 'Special Area 2 near Scapa',
'74122' : 'Special Area 2 near Hanna and Richdale',
'74123' : 'Special Area 2 near Sunnynook',
'74124' : 'Special Area 2 near Cessford',
'74131' : 'Special Area 4 near Veteran and Consort',
'74132' : 'Special Area 4 near Hemaruka',
'74133' : 'Special Area 4 near Kirriemuir and Compeer',
'74141' : 'Special Area 3 near Youngstown',
'74142' : 'Special Area 3 near Big Stone',
'74143' : 'Special Area 3 near New Brigden',
'74144' : 'Special Area 3 near Cereal and Oyen',
'74145' : 'Special Area 3 near Sibbald',
'74150' : 'M.D. of Provost near Bodo',
'74160' : 'M.D. of Acadia including Acadia Valley',
'74200' : 'Medicine Hat - Bow Island - Suffield',
#sub-regions
'74211' : 'Cypress County near Tide Lake',
'74212' : 'Cypress County near Suffield',
'74221' : 'Special Area 2 near Dinosaur Provincial Park',
'74222' : 'Special Area 2 near Jenner',
'74223' : 'Special Area 2 near Buffalo',
'74224' : 'Special Area 2 near Bindloss and Empress',
'74231' : 'Newell County near Patricia',
'74232' : 'Newell County near Tilley and Rolling Hills',
'74241' : 'Cypress County near CFB Suffield',
'74242' : 'Cypress County near Redcliff',
'74251' : 'Cypress County near Seven Persons',
'74252' : 'Cypress County near Dunmore',
'74253' : 'Cypress County near Irvine and Walsh',
'74261' : 'Cypress County near Schuler',
'74262' : 'Cypress County near McNeill',
'74270' : 'City of Medicine Hat',
'74280' : 'County of Forty Mile near Bow Island and Whitla',
'74300' : 'Cypress Hills Provincial Park - Foremost',
#sub-regions
'74310' : 'County of Forty Mile near Foremost',
'74320' : 'County of Forty Mile near Etzikom',
'74330' : 'County of Forty Mile near Manyberries',
'74340' : 'Cypress County near Cypress Hills Provincial Park',
'74350' : 'County of Forty Mile near Aden',
'74360' : 'Cypress County near Onefour',
'75100' : 'Bonnyville - St. Paul - Cold Lake - Lac La Biche',
#sub-regions
'75111' : 'Lakeland County near Plamondon',
'75112' : 'Lakeland County near Imperial Mills, Heart Lake and Philomena',
'75113' : 'Lakeland County near Lac La Biche and Sir Winston Churchill Park',
'75114' : 'Lakeland County near Lakeland Provincial Park',
'75115' : 'Lakeland County near Rich Lake',
'75116' : 'Lakeland County near Cold Lake air weapons range',
'75121' : 'Smoky Lake County near Kikino',
'75122' : 'Smoky Lake County near Vilna and Whitefish Lake First Nation',
'75131' : 'County of St. Paul near Ashmont',
'75132' : 'County of St. Paul near St. Paul',
'75133' : 'County of St. Paul near Elk Point',
'75134' : 'County of St. Paul near Riverview and Unipouheos First Nation',
'75141' : 'M.D. of Bonnyville near La Corey and Wolf Lake',
'75142' : 'M.D. of Bonnyville near Glendon and Moose Lake Provincial Park',
'75143' : 'M.D. of Bonnyville near Bonnyville and Ardmore',
'75144' : 'M.D. of Bonnyville near Cold Lake and Grand Centre',
'75145' : 'M.D. of Bonnyville near Beaverdam, Elizabeth and Cold Lake First Nation',
'75146' : 'M.D. of Bonnyville near Sputinow',
'75200' : 'Lloydminster - Wainwright - Vermilion - Provost',
#sub-regions
'75211' : 'County of Two Hills near Two Hills',
'75212' : 'County of Two Hills near Myrnam and Derwent',
'75221' : 'County of Minburn near Ranfurly and Innisfree',
'75222' : 'County of Minburn near Mannville and Minburn',
'75230' : 'Beaver County near Viking',
'75241' : 'Flagstaff County near Killam and Sedgewick',
'75242' : 'Flagstaff County near Lougheed and Hardisty',
'75243' : 'Flagstaff County near Alliance',
'75251' : 'County of Vermilion River near Vermilion',
'75252' : 'County of Vermilion River near Islay',
'75253' : 'County of Vermilion River near Clandonald and Dewberry',
'75254' : 'County of Vermilion River near Tulliby Lake',
'75255' : 'County of Vermilion River near Kitscoty and Marwayne',
'75256' : 'County of Vermilion River near Paradise Valley',
'75260' : 'City of Lloydminster - AB',
'75271' : 'M.D. of Wainwright near Irma',
'75272' : 'M.D. of Wainwright near Wainwright',
'75273' : 'M.D. of Wainwright near Edgerton',
'75274' : 'M.D. of Wainwright near Chauvin',
'75281' : 'M.D. of Provost near Hughenden',
'75282' : 'M.D. of Provost near Czar and Metiskow',
'75283' : 'M.D. of Provost near Provost',
'76100' : 'Westlock - Barrhead - Athabasca',
#sub-regions
'76110' : 'Woodlands County near Fort Assiniboine',
'76121' : 'County of Barrhead near Thunder Lake Provincial Park',
'76122' : 'County of Barrhead near Bloomsbury and Neerlandia',
'76123' : 'County of Barrhead near Barrhead and Lac La Nonne',
'76130' : 'M.D. of Lesser Slave River near Chisholm and Cross Lake Park',
'76141' : 'Westlock County near Jarvie',
'76142' : 'Westlock County near Westlock and Clyde',
'76151' : 'County of Athabasca near Athabasca and Island Lake',
'76152' : 'County of Athabasca near Rochester and Meanook',
'76153' : 'County of Athabasca near Grassland',
'76154' : 'County of Athabasca near Boyle and Caslan',
'76160' : 'County of Thorhild near Newbrook and Long Lake Provincial Park',
'76200' : 'Spruce Grove - Morinville - Mayerthorpe - Evansburg',
#sub-regions
'76210' : 'Yellowhead County near Evansburg, Wildwood and MacKay',
'76221' : 'Lac Ste. Anne County near Mayerthorpe and Sangudo',
'76222' : 'Lac Ste. Anne County near Cherhill and Glenevis',
'76223' : 'Lac Ste. Anne County near Onoway, Rich Valley and Lac Ste. Anne',
'76231' : 'Parkland County near Tomahawk and Entwistle',
'76232' : 'Parkland County near Wabamun Lake Provincial Park',
'76233' : 'Parkland County near Stony Plain and Spruce Grove',
'76241' : 'Sturgeon County near Calahoo, Villeneuve and Riviere Qui Barre',
'76242' : 'Sturgeon County near Legal and Mearns',
'76243' : 'Sturgeon County near Morinville',
'76300' : 'Fort Saskatchewan - Vegreville - Redwater - Smoky Lake',
#sub-regions
'76310' : 'County of Thorhild near Thorhild',
'76321' : 'Sturgeon County near Bon Accord and Gibbons',
'76322' : 'Sturgeon County near Redwater',
'76331' : 'Lamont County near Bruderheim and Lamont',
'76332' : 'Lamont County near Andrew',
'76333' : 'Lamont County near Mundare and Chipman',
'76340' : 'Smoky Lake County near Smoky Lake and Waskatenau',
'76350' : 'County of Two Hills near Willingdon',
'76360' : 'City of Fort Saskatchewan and Northern Strathcona County',
'76370' : 'Elk Island National Park',
'76380' : 'County of Minburn near Vegreville',
'76400' : 'City of Edmonton - St. Albert - Sherwood Park',
'76500' : 'Drayton Valley - Devon - Rimbey - Pigeon Lake',
#sub-regions
'76510' : 'Leduc County near Warburg and Thorsby',
'76520' : 'Leduc County near Devon and Calmar',
'76530' : 'M.D. of Brazeau near Drayton Valley and Breton',
'76540' : 'County of Wetaskiwin near Pigeon Lake',
'76550' : 'County of Wetaskiwin near Alder Flats and Winfield',
'76560' : 'Ponoka County near Rimbey, Bluffton and Hoadley',
'76570' : 'Ponoka County near Crestomere',
'76600' : 'Leduc - Camrose - Wetaskiwin - Tofield',
#sub-regions
'76611' : 'Leduc County near Leduc and Beaumont',
'76612' : 'Leduc County near New Sarepta',
'76620' : 'County of Wetaskiwin near Wetaskiwin, Millet and Gwynne',
'76630' : 'Strathcona County near Cooking Lake',
'76641' : 'Beaver County near Tofield',
'76642' : 'Beaver County near Ryley and Holden',
'76651' : 'County of Camrose near Hay Lakes and Miquelon Lake Provincial Park',
'76652' : 'County of Camrose near Camrose',
'76653' : 'County of Camrose near New Norway',
'76654' : 'County of Camrose near Bawlf',
'76660' : 'Flagstaff County near Daysland',
'77100' : 'Grande Prairie - Beaverlodge - Valleyview',
#sub-regions
'77111' : 'County of Grande Prairie near Beaverlodge, Hythe and Demmitt',
'77112' : 'County of Grande Prairie near Sexsmith and La Glace',
'77113' : 'County of Grande Prairie near Grande Prairie and Wembley',
'77121' : 'M.D. of Greenview near Amundson',
'77122' : 'M.D. of Greenview near DeBolt',
'77123' : 'M.D. of Greenview near Little Smoky',
'77124' : 'M.D. of Greenview near Young\'s Point and Sturgeon Lake First Nation',
'77125' : 'M.D. of Greenview near Valleyview',
'77200' : 'Hinton - Grande Cache',
#sub-regions
'77210' : 'M.D. of Greenview near Grande Cache',
'77220' : 'M.D. of Greenview near Kakwa Wildland Provincial Park',
'77230' : 'Yellowhead County near William A. Switzer Provincial Park',
'77240' : 'Willmore Wilderness Park',
'77250' : 'Yellowhead County near Hinton and Obed Lake Provincial Park',
'77300' : 'Slave Lake',
#sub-regions
'77311' : 'M.D. of Big Lakes near Joussard and East Prairie',
'77312' : 'M.D. of Big Lakes near Grouard Mission and Hilliards Bay Park',
'77313' : 'M.D. of Big Lakes near Faust and Kinuso',
'77321' : 'M.D. of Lesser Slave River near Slave Lake',
'77322' : 'M.D. of Lesser Slave River near Slave Lake Provincial Park',
'77323' : 'M.D. of Lesser Slave River near Smith',
'77330' : 'M.D. of Opportunity near Calling Lake',
'77340' : 'County of Athabasca near Wandering River',
'77400' : 'Whitecourt - Edson - Fox Creek - Swan Hills',
#sub-regions
'77410' : 'M.D. of Greenview near Fox Creek',
'77421' : 'Yellowhead County near Edson and Marlboro',
'77422' : 'Yellowhead County near Peers and Niton Junction',
'77431' : 'Woodlands County near Windfall Creek',
'77432' : 'Woodlands County near Carson-Pegasus Provincial Park',
'77433' : 'Woodlands County near Lone Pine',
'77434' : 'Woodlands County near Whitecourt and Blue Ridge',
'77440' : 'M.D. of Big Lakes near Swan Hills',
'78100' : 'High Level - Rainbow Lake - Fort Vermilion - Mackenzie Highway',
#sub-regions
'78111' : 'M.D. of Mackenzie near Bistcho Lake',
'78112' : 'M.D. of Mackenzie near Zama Lake, Chateh and Rainbow Lake',
'78113' : 'M.D. of Mackenzie near Indian Cabins and Steen River',
'78114' : 'M.D. of Mackenzie near Meander River',
'78115' : 'M.D. of Mackenzie near High Level',
'78120' : 'M.D. of Northern Lights near Paddle Prairie and Carcajou',
'78131' : 'M.D. of MacKenzie near the Caribou Mountains',
'78132' : 'M.D. of Mackenzie near Fort Vermilion and Child Lake First Nation',
'78133' : 'M.D. of MacKenzie near John D\'or Prairie and Fox Creek',
'78134' : 'M.D. of Mackenzie near Buffalo Head Prairie and La Crete',
'78135' : 'M.D. of Mackenzie near Tall Cree First Nation and Wadlin Lake',
'78200' : 'Peace River - Fairview - High Prairie - Manning',
#sub-regions
'78211' : 'M.D. of Clear Hills near Notikewin River',
'78212' : 'M.D. of Clear Hills near Cleardale and Worsley',
'78213' : 'M.D. of Clear Hills near Eureka River and Hines Creek',
'78221' : 'Saddle Hills County near Silver Valley and Bay Tree',
'78222' : 'Saddle Hills County near Moonshine Lake Provincial Park',
'78223' : 'Saddle Hills County near Woking',
'78231' : 'M.D. of Spirit River including Spirit River and Rycroft',
'78232' : 'M.D. of Fairview including Fairview, Whitelaw and Dunvegan Provincial Park',
'78233' : 'M.D. of Peace including Peace River and Grimshaw',
'78241' : 'M.D. of Northern Lights near Manning and Notikewin Provincial Park',
'78242' : 'M.D. of Northern Lights near Dixonville and Chinook Valley',
'78251' : 'Birch Hills County near Wanham and Peoria',
'78252' : 'Birch Hills County near Eaglesham',
'78261' : 'M.D. of East Peace near Keppler Creek',
'78262' : 'M.D. of East Peace near Nampa and Three Creeks',
'78270' : 'M.D. of Smoky River including McLennan, Falher and Girouxville',
'78280' : 'M.D. of Big Lakes near High Prairie and Winagami Lake Provincial Park',
'78300' : 'Wabasca - Peerless Lake - Gift Lake - Cadotte Lake',
#sub-regions
'78311' : 'M.D. of East Peace near Bison Lake',
'78312' : 'M.D. of East Peace near Woodland Cree First Nation and Little Buffalo',
'78313' : 'M.D. of East Peace near Utikoomak Lake First Nation',
'78320' : 'M.D. of Big Lakes near Peavine and Gift Lake settlements',
'78331' : 'M.D. of Opportunity near Peerless Lake and Trout Lake',
'78332' : 'M.D. of Opportunity near Red Earth Creek',
'78333' : 'M.D. of Opportunity near Wabasca-Desmarais and Sandy Lake',
'78334' : 'M.D. of Opportunity near Chipewyan Lake',
'79100' : 'Fort Chipewyan - Wood Buffalo National Park',
#sub-regions
'79111' : 'Wood Buffalo National Park near Buffalo river',
'79112' : 'Wood Buffalo National Park near Hay Camp',
'79113' : 'Wood Buffalo National Park near Garden Creek',
'79114' : 'Wood Buffalo National Park near Peace Point and Lake Claire',
'79121' : 'R.M. of Wood Buffalo near Namur River',
'79122' : 'R.M. of Wood Buffalo near Fort Hills',
'79123' : 'R.M. of Wood Buffalo near Old Fort and Chipewyan First Nation',
'79131' : 'R.M. of Wood Buffalo near Fitzgerald',
'79132' : 'R.M. of Wood Buffalo near Colin-Cornwall Lakes Wildland Park',
'79133' : 'R.M. of Wood Buffalo near Fort Chipewyan',
'79200' : 'Fort McMurray - Fort MacKay',
#sub-regions
'79210' : 'R.M. of Wood Buffalo near Fort Mackay',
'79220' : 'R.M. of Wood Buffalo near Fort McMurray',
'79230' : 'R.M. of Wood Buffalo near Anzac and Gregoire Lake Provincial Park',
'79240' : 'Lakeland County near Highway 63 and Crow Lake Provincial Park',
'79250' : 'R.M. of Wood Buffalo near Conklin and Chard',
#British Columbia
'81100' : 'Haida Gwaii',
'81200' : 'North Vancouver Island',
#sub-regions
'81210' : 'North- and west-facing coasts',
'81220' : 'East-facing coasts',
'81300' : 'East Vancouver Island',
'81400' : 'West Vancouver Island',
#sub-regions
'81410' : 'Sombrio Point and north',
'81420' : 'South of Sombrio Point',
'81500' : 'Inland Vancouver Island',
'81600' : 'Greater Victoria',
'82100' : 'Central Coast - Coastal sections',
'82200' : 'Central Coast - Inland sections',
'82300' : 'Sunshine Coast',
'82400' : 'Whistler',
'82500' : 'Howe Sound',
'82600' : 'Metro Vancouver',
'82700' : 'Fraser Valley',
#sub-regions
'82710' : 'West including Abbotsford',
'82720' : 'East including Chilliwack',
'82800' : 'Southern Gulf Islands',
'83100' : 'Fraser Canyon',
'83200' : 'South Thompson',
'83300' : 'Nicola',
'83400' : 'Similkameen',
'83500' : 'Okanagan Valley',
#sub-regions
'83510' : 'North including Vernon',
'83520' : 'Central including Kelowna',
'83530' : 'South including Penticton',
'83600' : 'Shuswap',
'84100' : 'Arrow and Slocan Lakes',
'84200' : 'Boundary',
'84300' : 'West Kootenay',
'84400' : 'Kootenay Lake',
'84500' : 'East Kootenay',
#sub-regions
'84510' : 'South including Cranbrook',
'84520' : 'North including Invermere',
'84600' : 'Elk Valley',
'85100' : 'North Thompson',
'85200' : 'North Columbia',
'85300' : 'Kinbasket',
'85400' : 'West Columbia',
'85500' : 'East Columbia',
'85600' : 'Yoho and Kootenay Parks',
'86100' : 'Chilcotin',
'86200' : 'Cariboo',
#sub-regions
'86210' : 'North including Quesnel',
'86220' : 'South including Williams Lake',
'86300' : '100 Mile',
'86400' : 'Yellowhead',
'87100' : 'Bulkley Valley - The Lakes',
#sub-regions
'87110' : 'Northwest including Smithers',
'87120' : 'Southeast including Burns Lake',
'87200' : 'Williston',
'87300' : 'BC Peace River',
#sub-regions
'87310' : 'South',
'87320' : 'North',
'87400' : 'Prince George',
'87500' : 'McGregor',
'88100' : 'Fort Nelson',
'88200' : 'Muncho Lake and Stone Mountain Provincial Parks',
'88300' : 'Watson Lake - BC',
'89100' : 'North Coast - Coastal sections',
#sub-regions
'89110' : 'Banks Island and north',
'89120' : 'South of Banks Island',
'89200' : 'North Coast - Inland sections',
#sub-regions
'89210' : 'North of Kitimat',
'89220' : 'Kitimat and south',
'89300' : 'Dease Lake',
'89400' : 'Cassiar Mountains - BC',
'89500' : 'Teslin - BC',
'89600' : 'Atlin',
'89700' : 'South Klondike Highway - Carcross to White Pass',
'89800' : 'Haines Road - Haines Junction to Pleasant Camp',
#Yukon
'91100' : 'Dawson',
'91200' : 'Mayo',
'91300' : 'Beaver Creek',
'91400' : 'Pelly - Carmacks',
'91500' : 'Kluane Lake',
'91600' : 'Haines Junction',
'91700' : 'Whitehorse',
'92100' : 'Teslin - YT',
'92200' : 'Cassiar Mountains - YT',
'92300' : 'Watson Lake - YT',
'92400' : 'Faro - Ross River',
'93100' : 'Dempster',
'93200' : 'Old Crow',
#Northwest Territories
'94210' : 'Wrigley Region',
'94220' : 'Fort Simpson Region including Jean Marie River',
'94230' : 'Fort Liard Region including Nahanni Butte - Trout Lake',
'94310' : 'North Slave Region including Wekweti - Wha Ti - Behchoko',
'94320' : 'Fort Providence Region including Kakisa - Chan Lake',
'94330' : 'Yellowknife Region',
'94510' : 'Hay River Region including Enterprise',
'94520' : 'Fort Resolution Region including Highway 6',
'94530' : 'Lutsel K\'e Region',
'94540' : 'Thebacha Region including Fort Smith - Salt River Reserve',
'95100' : 'Tuktoyaktuk - East Channel Region',
'95200' : 'Aklavik Region',
'95300' : 'Inuvik Region',
'95400' : 'South Delta Region including Fort McPherson - Tsiigehtchic',
'95610' : 'Fort Good Hope Region',
'95620' : 'Norman Wells - Tulita Region',
'95630' : 'Colville Lake',
'95640' : 'Deline',
'95800' : 'Paulatuk',
'96210' : 'Sachs Harbour',
'96310' : 'Ulukhaktok',
#Nunavut
'97110' : 'Cambridge Bay',
'97210' : 'Kugluktuk',
'97310' : 'Taloyoak',
'97410' : 'Gjoa Haven',
'97420' : 'Kugaaruk',
'97510' : 'Baker Lake',
'97610' : 'Arviat',
'97620' : 'Rankin Region including Whale Cove',
'97630' : 'Chesterfield Inlet',
'97710' : 'Hall Beach',
'97720' : 'Igloolik',
'97740' : 'Repulse Bay',
'97810' : 'Coral Harbour',
'97820' : 'Sanikiluaq',
'98110' : 'Arctic Bay',
'98120' : 'Pond Inlet',
'98130' : 'Clyde River',
'98210' : 'Qikiqtarjuaq',
'98220' : 'Pangnirtung',
'98230' : 'Iqaluit',
'98240' : 'Kimmirut',
'98250' : 'Cape Dorset',
'99110' : 'Resolute',
'99210' : 'Grise Fjord',
'XXXXX' : 'TEST',
}
SAME_LOCA={
'0' : None,
'1' : 'Northwest',
'2' : 'North Central',
'3' : 'Northeast',
'4' : 'West Central',
'5' : 'Central',
'6' : 'East Central',
'7' : 'Southwest',
'8' : 'South Central',
'9' : 'Southeast',
}
SAME_LOCB={
'0' : None,
'1': 'MOUNTAIN/HIGH',
'2': 'MOUNTAIN',
'3': 'VALLEY',
}
# SAME_CTYB is a list of "B" class counties for geographic subdivisons. If this applies to your area, add your county code to this list, and modify SAME_LOCB as needed.
# A, C and D counties all use SAME_LOCA
SAME_CTYB=['SAME1', 'SAME2']
SAME__ORG={
'' : None,
'EAS' : {'NAME' : {'US' : 'Broadcast station or cable system', 'CA' : 'Broadcast station or cable system'}, 'PLURAL' : False, 'ARTICLE' : {'US' :'A', 'CA' : 'A'}},
'CIV' : {'NAME' : {'US' : 'Civil authorities', 'CA' : 'Civil authorities'}, 'PLURAL' : True, 'ARTICLE' : {'US' :'THE', 'CA' : 'THE'}},
'WXR' : {'NAME' : {'US' : 'National Weather Service', 'CA' : 'Environment Canada'}, 'PLURAL' : False, 'ARTICLE' : {'US' :'THE', 'CA' : ''}},
'PEP' : {'NAME' : {'US' : 'Primary Entry Point System', 'CA' : 'Primary Entry Point System'}, 'PLURAL' : False, 'ARTICLE' : {'US' :'THE', 'CA' : 'THE'}},
'EAN' : {'NAME' : {'US' : 'Emergency Action Notification Network ', 'CA' : 'Emergency Action Notification Network '}, 'PLURAL' : False, 'ARTICLE' : {'US' :'THE', 'CA' : 'THE'}},
}
SAME_UEEE={
'W' : 'Warning',
'A' : 'Watch',
'E' : 'Emergency',
'S' : 'Statement',
'T' : 'Test',
'M' : 'Message',
'R' : 'Warning',
'N' : 'Notification',
}
#SAME__EEE is a list of current and proposed event codes.
SAME__EEE={
'' : None,
'BZW' : 'Blizzard Warning',
'CFA' : 'Coastal Flood Watch',
'CFW' : 'Coastal Flood Warning',
'DSW' : 'Dust Storm Warning',
'FFA' : 'Flash Flood Watch',
'FFW' : 'Flash Flood Warning',
'FFS' : 'Flash Flood Statement',
'FLA' : 'Flood Watch',
'FLW' : 'Flood Warning',
'FLS' : 'Flood Statement',
'HWA' : 'High Wind Watch',
'HWW' : 'High Wind Warning',
'HUA' : 'Hurricane Watch',
'HUW' : 'Hurricane Warning',
'HLS' : 'Hurricane Statement',
'SVA' : 'Severe Thunderstorm Watch',
'SVR' : 'Severe Thunderstorm Warning',
'SVS' : 'Severe Weather Statement',
'SMW' : 'Special Marine Warning',
'SPS' : 'Special Weather Statement',
'TOA' : 'Tornado Watch',
'TOR' : 'Tornado Warning',
'TRA' : 'Tropical Storm Watch',
'TRW' : 'Tropical Storm Warning',
'TSA' : 'Tsunami Watch',
'TSW' : 'Tsunami Warning',
'WSA' : 'Winter Storm Watch',
'WSW' : 'Winter Storm Warning',
'EAN' : 'Emergency Action Notification',
'EAT' : 'Emergengy Action Termination',
'NIC' : 'National Information Center',
'NPT' : 'National Periodic Test',
'NAT' : 'National Audible Test',
'NST' : 'National Silent Test',
'RMT' : 'Required Monthly Test',
'RWT' : 'Required Weekly Test',
'ADR' : 'Administrative Message',
'AVA' : 'Avalanche Watch',
'AVW' : 'Avalanche Warning',
'CAE' : 'Child Abduction Emergency',
'CDW' : 'Civil Danger Warning',
'CEM' : 'Civil Emergency Message',
'EQW' : 'Earthquake Warning',
'EVI' : 'Evacuation Immediate',
'FRW' : 'Fire Warning',
'HMW' : 'Hazardous Materials Warning',
'LEW' : 'Law Enforcement Warning',
'LAE' : 'Local Area Emergency',
'TOE' : '911 Outage Emergency',
'NUW' : 'Nuclear Plant Warning',
'RHW' : 'Radiological Hazard Warning',
'SPW' : 'Shelter in Place Warning',
'VOW' : 'Volcano Warning',
'NMN' : 'Network Message Notification',
'DMO' : 'Demo Warning',
'EWW' : 'Extreme Wind Warning',
'SSA' : 'Storm Surge Watch',
'SSW' : 'Storm Surge Warning',
'FSW' : 'Flash Freeze Warning',
'FZW' : 'Freeze Warning',
'HLS' : 'Hurricane Local Statement',
'SMW' : 'Special Marine Warning',
'BHW' : 'Biological Hazard Warning',
'BWW' : 'Boil Water Warning',
'CHW' : 'Chemical Hazard Warning',
'CWW' : 'Contaminated Water Warning',
'DBA' : 'Dam Watch',
'DBW' : 'Dam Break Warning',
'DEW' : 'Contagious Disease Warning',
'EVA' : 'Evacuation Watch',
'FCW' : 'Food Contamination Warning',
'IBW' : 'Iceberg Warning',
'IFW' : 'Industrial Fire Warning',
'LSW' : 'Land Slide Warning',
'POS' : 'Power Outage Statement',
'WFA' : 'Wild Fire Watch',
'WFW' : 'Wild Fire Warning',
}
# This list is a NOAA list of meteorological station locations cross referenced with the FAA list of airport codes and locations.
# Without a definative list of ICAO codes used with NWS alerts, this is the best available information. No source matches all ICAO codes in use.
ICAO_LIST={
'LOCATION' : 'US',
'K01R' : 'CLAIBORNE RANGE, LOUISANA',
'K04V' : 'SAGAUCHE, COLORADO',
'K06D' : 'ROLLA, NORTH DAKOTA',
'K07S' : 'BEAUMONT, KANSAS',
'K08D' : 'STANLEY, NORTH DAKOTA',
'K0J4' : 'FLORALA, ALABAMA',
'K0VG' : 'JONESVILLE, VIRGINIA',
'K11J' : 'ONONDAGA, MICHIGAN',
'K11R' : 'BRENHAM, TEXAS',
'K12N' : 'ANDOVER, NEW JERSEY',
'K14Y' : 'LONG PRAIRIE, MINNESOTA',
'K1A5' : 'FRANKLIN, NORTH CAROLINA',
'K1A6' : 'MIDDLESBORO, KENTUCKY',
'K1B7' : 'BOOTHVILLE, LOUISANA',
'K1F0' : 'ARDMORE, OKLAHOMA',
'K1H2' : 'EFFINGHAM, ILLINOIS',
'K1J1' : 'SAINT SIMON ISLAND, GEORGIA',
'K1K5' : 'ELKHART, KANSAS',
'K1L2' : 'SANTA MONICA, CALIFORNIA',
'K1M4' : 'HALEYVILLE, ALABAMA',
'K1O5' : 'MONTAGUE, CALIFORNIA',
'K1P1' : 'PLYMOUTH, NEW HAMPSHIRE',
'K1V1' : 'RIFLE, COLORADO',
'K1V4' : 'ST JOHNSBURY, VERMONT',
'K20C' : 'SAINT JOSEPH, MICHIGAN',
'K20U' : 'BEACH, NORTH DAKOTA',
'K20V' : 'KREMMLING, COLORADO',
'K21D' : 'LAKE ELMO, MINNESOTA',
'K27A' : 'ELBERT, GEORGIA',
'K29G' : 'RAVENNA, OHIO',
'K2C8' : 'CAVALIER, NORTH DAKOTA',
'K2D5' : 'OAKES, NORTH DAKOTA',
'K2DP' : 'ENGELHARD, NORTH CAROLINA',
'K2G6' : 'MEADVILLE, PENNSYLVANIA',
'K2S9' : 'WILLAPA HARBOR, WASHINGTON',
'K2U7' : 'STANLEY, IDAHO',
'K33G' : 'PORT HURON, MICHIGAN',
'K36U' : 'HEBER CITY, UTAH',
'K3A1' : 'CULLMAN, ALABAMA',
'K3A6' : 'STEVENSON RANCH, CALIFORNIA',
'K3B1' : 'GREENVILLE, MAINE',
'K3I2' : 'POINT PLEASANT, WEST VIRGINIA',
'K3J7' : 'GREENSBORO, GEORGIA',
'K3LF' : 'LITCHFIELD, ILLINOIS',
'K3OI' : 'LAMONI, IOWA',
'K3R1' : 'BAY CITY, TEXAS',
'K3S2' : 'AURORA STATE, OREGON',
'K3T5' : 'LA GRANGE, TEXAS',
'K40B' : 'CLAYTON LAKE, MAINE',
'K40G' : 'VALLE, ARIZONA',
'K40J' : 'PERRY, FLORIDA',
'K41G' : 'BATH, MICHIGAN',
'K44N' : 'MILLBROOK, NEW YORK',
'K46D' : 'CARRINGTON, NORTH DAKOTA',
'K47A' : 'CANTON, GEORGIA',
'K48I' : 'SUTTON, WEST VIRGINIA',
'K49N' : 'EAST MORICHES, NEW YORK',
'K4A9' : 'FORT PAYNE, ALABAMA',
'K4O4' : 'IDABEL, OKLAHOMA',
'K4SL' : 'STAR LAKE JOHNSON RANCH, NEW MEXICO',
'K5H4' : 'HARVEY, NORTH DAKOTA',
'K5T6' : 'SANTA TERESA, NEW MEXICO',
'K6R6' : 'DRYDEN, TEXAS',
'K75S' : 'FREDONIA, WASHINGTON',
'K76S' : 'PENN COVE PARK, WASHINGTON',
'K79J' : 'ANDALUSIA, ALABAMA',
'K7A9' : 'PLAINS, GEORGIA',
'K7L2' : 'LINTON, NORTH DAKOTA',
'K8A0' : 'ALBERTVILLE, ALABAMA',
'K8D3' : 'SISSETON, SOUTH DAKOTA',
'K8S0' : 'KIOWA, MONTANA',
'K96D' : 'WALHALLA, NORTH DAKOTA',
'K96S' : 'DUNGENESS, WASHINGTON',
'K9BB' : 'WELLS, NEVADA',
'K9D7' : 'CANDO, NORTH DAKOTA',
'K9L2' : 'EDWARDS AFB, CALIFORNIA',
'K9V9' : 'CHAMBERLAIN, SOUTH DAKOTA',
'KAAA' : 'LINCOLN, ILLINOIS',
'KAAF' : 'APALACHICOLA, FLORIDA',
'KAAO' : 'WICHITA, KANSAS',
'KAAT' : 'ALTURAS, CALIFORNIA',
'KABE' : 'ALLENTOWN, PENNSYLVANIA',
'KABI' : 'ABILENE, TEXAS',
'KABQ' : 'ALBUQUERQUE, NEW MEXICO',
'KABR' : 'ABERDEEN, SOUTH DAKOTA',
'KABX' : 'ALBUQUERQUE, NEW MEXICO',
'KABY' : 'ALBANY, GEORGIA',
'KACB' : 'BELLAIRE, MICHIGAN',
'KACK' : 'NANTUCKET, MASSACHUSETTS',
'KACP' : 'OAKDALE, LOUISIANA',
'KACQ' : 'WASECA, MINNESOTA',
'KACT' : 'WACO, TEXAS',
'KACV' : 'ARCATA, CALIFORNIA',
'KACY' : 'ATLANTIC CITY, NEW JERSEY',
'KADC' : 'WADENA, MINNESOTA',
'KADG' : 'ADRIAN, MICHIGAN',
'KADH' : 'ADA, OKLAHOMA',
'KADM' : 'ARDMORE, OKLAHOMA',
'KADS' : 'DALLAS, TEXAS',
'KADU' : 'AUDUBON, IOWA',
'KADW' : 'CAMP SPRINGS, MARYLAND',
'KAEG' : 'ALBUQUERQUE, NEW MEXICO',
'KAEJ' : 'BUENA VISTA, COLORADO',
'KAEL' : 'ALBERT LEA, MINNESOTA',
'KAEX' : 'ALEXANDRIA, LOUISIANA',
'KAFF' : 'COLORADO SPRINGS, COLORADO',
'KAFJ' : 'WASHINGTON, PENNSYLVANIA',
'KAFK' : 'NEBRASKA CITY, NEBRASKA',
'KAFN' : 'JAFFREY, NEW HAMPSHIRE',
'KAFP' : 'WADESBORO, NORTH CAROLINA',
'KAFW' : 'FORT WORTH, TEXAS',
'KAGC' : 'PITTSBURGH, PENNSYLVANIA',
'KAGR' : 'AVON PARK, FLORIDA',
'KAGS' : 'AUGUSTA, GEORGIA',
'KAHN' : 'ATHENS, GEORGIA',
'KAIA' : 'ALLIANCE, NEBRASKA',
'KAID' : 'ANDERSON, INDIANA',
'KAIG' : 'ANTIGO, WISCONSIN',
'KAIO' : 'ATLANTIC, IOWA',
'KAIT' : 'AITKIN, MINNESOTA',
'KAIZ' : 'LAKE OZARK, MISSOURI',
'KAJG' : 'MOUNT CARMEL, ILLINOIS',
'KAKH' : 'GASTONIA, NORTH CAROLINA',
'KAKO' : 'AKRON, COLORADO',
'KAKQ' : 'WAKEFIELD, VIRGINIA',
'KAKR' : 'AKRON, OHIO',
'KALB' : 'ALBANY, NEW YORK',
'KALI' : 'ALICE, TEXAS',
'KALM' : 'ALAMOGORDO, NEW MEXICO',
'KALN' : 'ALTON, ILLINOIS',
'KALO' : 'WATERLOO, IOWA',
'KALS' : 'ALAMOSA, COLORADO',
'KALW' : 'WALLA WALLA, WASHINGTON',
'KALX' : 'ALEXANDER CITY, ALABAMA',
'KAMA' : 'AMARILLO, TEXAS',
'KAMG' : 'ALMA, GEORGIA',
'KAMN' : 'ALMA, MICHIGAN',
'KAMW' : 'AMES, IOWA',
'KANB' : 'ANNISTON, ALABAMA',
'KAND' : 'ANDERSON, SOUTH CAROLINA',
'KANE' : 'MINNEAPOLIS, MINNESOTA',
'KANJ' : 'SAULT STE MARIE, MICHIGAN',
'KANW' : 'AINSWORTH, NEBRASKA',
'KAOH' : 'LIMA, OHIO',
'KAOO' : 'ALTOONA, PENNSYLVANIA',
'KAPA' : 'DENVER, COLORADO',
'KAPC' : 'NAPA, CALIFORNIA',
'KAPF' : 'NAPLES, FLORIDA',
'KAPG' : 'ABERDEEN PROVING GROUND, MARYLAND',
'KAPN' : 'ALPENA, MICHIGAN',
'KAPV' : 'APPLE VALLEY, CALIFORNIA',
'KAQO' : 'LLANO, TEXAS',
'KAQP' : 'APPLETON, MINNESOTA',
'KAQR' : 'ATOKA, OKLAHOMA',
'KAQV' : 'FORT POLK, LOUISANA',
'KAQW' : 'NORTH ADAMS, MASSACHUSETTS',
'KARA' : 'NEW IBERIA, LOUISIANA',
'KARB' : 'ANN ARBOR, MICHIGAN',
'KARG' : 'WALNUT RIDGE, ARKANSAS',
'KARM' : 'WHARTON, TEXAS',
'KARR' : 'CHICAGO, ILLINOIS',
'KART' : 'WATERTOWN, NEW YORK',
'KARV' : 'MINOCQUA, WISCONSIN',
'KASD' : 'SLIDELL, LOUISIANA',
'KASE' : 'ASPEN, COLORADO',
'KASG' : 'SPRINGDALE, ARKANSAS',
'KASH' : 'NASHUA, NEW HAMPSHIRE',
'KASJ' : 'AHOSKIE, NORTH CAROLINA',
'KAST' : 'ASTORIA, OREGON',
'KASW' : 'WARSAW, INDIANA',
'KASX' : 'ASHLAND, WISCONSIN',
'KATL' : 'ATLANTA, GEORGIA',
'KATS' : 'ARTESIA, NEW MEXICO',
'KATT' : 'AUSTIN, TEXAS',
'KATW' : 'APPLETON, WISCONSIN',
'KATY' : 'WATERTOWN, SOUTH DAKOTA',
'KAUG' : 'AUGUSTA, MAINE',
'KAUH' : 'AURORA, NEBRASKA',
'KAUM' : 'AUSTIN, MINNESOTA',
'KAUN' : 'AUBURN, CALIFORNIA',
'KAUO' : 'AUBURN, ALABAMA',
'KAUS' : 'AUSTIN, TEXAS',
'KAUW' : 'WAUSAU, WISCONSIN',
'KAVC' : 'SOUTH HILL, VIRGINIA',
'KAVK' : 'ALVA, OKLAHOMA',
'KAVL' : 'ASHEVILLE, NORTH CAROLINA',
'KAVP' : 'SCRANTON, PENNSYLVANIA',
'KAVX' : 'AVALON, CALIFORNIA',
'KAWG' : 'WASHINGTON, IOWA',
'KAWM' : 'WEST MEMPHIS, ARKANSAS',
'KAWO' : 'ARLINGTON, WASHINGTON',
'KAXA' : 'ALGONA, IOWA',
'KAXN' : 'ALEXANDRIA, MINNESOTA',
'KAXS' : 'ALTUS, OKLAHOMA',
'KAYE' : 'AYER, MASSACHUSETTS',
'KAYS' : 'WAYCROSS, GEORGIA',
'KAZC' : 'COLORADO CITY, ARIZONA',
'KAZO' : 'KALAMAZOO, MICHIGAN',
'KBAB' : 'MARYSVILLE, CALIFORNIA',
'KBAC' : 'VALLEY CITY, NORTH DAKOTA',
'KBAD' : 'SHREVEPORT, LOUISANA',
'KBAF' : 'WESTFIELD, MASSACHUSETTS',
'KBAK' : 'COLUMBUS, INDIANA',
'KBAN' : 'BRIDGEPORT, CALIFORNIA',
'KBAX' : 'BAD AXE, MICHIGAN',
'KBAZ' : 'NEW BRAUNFELS, TEXAS',
'KBBB' : 'BENSON, MINNESOTA',
'KBBD' : 'BRADY, TEXAS',
'KBBW' : 'BROKEN BOW, NEBRASKA',
'KBCB' : 'BLACKSBURG, VIRGINIA',
'KBCE' : 'BRYCE CANYON, UTAH',
'KBCT' : 'BOCA RATON, FLORIDA',
'KBDE' : 'BAUDETTE, MINNESOTA',
'KBDL' : 'WINDSOR LOCKS, CONNECTICUT',
'KBDR' : 'BRIDGEPORT, CONNECTICUT',
'KBED' : 'BEDFORD, MASSACHUSETTS',
'KBEH' : 'BENTON HARBOR, MICHIGAN',
'KBFD' : 'BRADFORD, PENNSYLVANIA',
'KBFF' : 'SCOTTSBLUFF, NEBRASKA',
'KBFI' : 'SEATTLE, WASHINGTON',
'KBFL' : 'BAKERSFIELD, CALIFORNIA',
'KBFM' : 'MOBILE, ALABAMA',
'KBFW' : 'SILVER BAY, MINNESOTA',
'KBGD' : 'BORGER, TEXAS',
'KBGE' : 'BAINBRIDGE, GEORGIA',
'KBGM' : 'BINGHAMTON, NEW YORK',
'KBGR' : 'BANGOR, MAINE',
'KBHB' : 'BAR HARBOR, MAINE',
'KBHK' : 'BAKER, MONTANA',
'KBHM' : 'BIRMINGHAM, ALABAMA',
'KBID' : 'BLOCK ISLAND, RHODE ISLAND',
'KBIE' : 'BEATRICE, NEBRASKA',
'KBIF' : 'EL PASO, TEXAS',
'KBIH' : 'BISHOP, CALIFORNIA',
'KBIL' : 'BILLINGS, MONTANA',
'KBIS' : 'BISMARCK, NORTH DAKOTA',
'KBIV' : 'HOLLAND, MICHIGAN',
'KBIX' : 'BILOXI, MISSISSIPPI',
'KBJC' : 'DENVER, COLORADO',
'KBJI' : 'BEMIDJI, MINNESOTA',
'KBJJ' : 'WOOSTER, OHIO',
'KBJN' : 'TONOPAH TEST RANGE, NEVADA',
'KBKB' : 'FORT POLK, LOUISANA',
'KBKE' : 'BAKER CITY, OREGON',
'KBKF' : 'DENVER, COLORADO',
'KBKL' : 'CLEVELAND, OHIO',
'KBKS' : 'FALFURRIAS, TEXAS',
'KBKT' : 'BLACKSTONE, VIRGINIA',
'KBKV' : 'BROOKSVILLE, FLORIDA',
'KBKW' : 'BECKLEY, WEST VIRGINIA',
'KBKX' : 'BROOKINGS, SOUTH DAKOTA',
'KBLF' : 'BLUEFIELD, WEST VIRGINIA',
'KBLH' : 'BLYTHE, CALIFORNIA',
'KBLI' : 'BELLINGHAM, WASHINGTON',
'KBLM' : 'BELMAR, NEW JERSEY',
'KBLU' : 'EMIGRANT GAP, CALIFORNIA',
'KBLV' : 'BELLEVILLE, ILLINOIS',
'KBMG' : 'BLOOMINGTON, INDIANA',
'KBMI' : 'BLOOMINGTON-NORMAL, ILLINOIS',
'KBML' : 'BERLIN, NEW HAMPSHIRE',
'KBMQ' : 'BURNET, TEXAS',
'KBMX' : 'BIRMINGHAM, ALABAMA',
'KBNA' : 'NASHVILLE, TENNESSEE',
'KBNO' : 'BURNS, OREGON',
'KBNW' : 'BOONE, IOWA',
'KBOI' : 'BOISE, IDAHOAHO',
'KBOK' : 'BROOKINGS, OREGON',
'KBOS' : 'BOSTON, MASSACHUSETTS',
'KBOW' : 'BARTOW, FLORIDA',
'KBOX' : 'BOSTON, MASSACHUSETTS',
'KBPG' : 'BIG SPRING, TEXAS',
'KBPI' : 'BIG PINEY, WYOMING',
'KBPK' : 'MOUNTAIN HOME, ARKANSAS',
'KBPP' : 'BOWMAN, NORTH DAKOTA',
'KBPT' : 'BEAUMONT, TEXAS',
'KBQK' : 'BRUNSWICK, GEORGIA',
'KBRD' : 'BRAINERD, MINNESOTA',
'KBRL' : 'BURLINGTON, IOWA',
'KBRO' : 'BROWNSVILLE, TEXAS',
'KBRX' : 'BORDEAUX, WYOMING',
'KBTL' : 'BATTLE CREEK, MICHIGAN',
'KBTM' : 'BUTTE, MONTANA',
'KBTP' : 'BUTLER, PENNSYLVANIA',
'KBTR' : 'BATON ROUGE, LOUISIANA',
'KBTV' : 'BURLINGTON, VERMONT',
'KBUF' : 'BUFFALO, NEW YORK',
'KBUR' : 'BURBANK, CALIFORNIA',
'KBUU' : 'BURLINGTON, WISCONSIN',
'KBUY' : 'BURLINGTON, NORTH CAROLINA',
'KBVE' : 'BOOTHVILLE, LOUISANA',
'KBVI' : 'BEAVER FALLS, PENNSYLVANIA',
'KBVN' : 'ALBION, NEBRASKA',
'KBVO' : 'BARTLESVILLE, OKLAHOMA',
'KBVS' : 'BURLINGTON, WASHINGTON',
'KBVX' : 'BATESVILLE, ARKANSAS',
'KBVY' : 'BEVERLY, MASSACHUSETTS',
'KBWD' : 'BROWNWOOD, TEXAS',
'KBWG' : 'BOWLING GREEN, KENTUCKY',
'KBWI' : 'BALTIMORE, MARYLAND',
'KBWP' : 'WAHPETON, NORTH DAKOTA',
'KBXA' : 'BOGALUSA, LOUISIANA',
'KBYG' : 'BUFFALO, WYOMING',
'KBYH' : 'BLYTHEVILLE, ARKANSAS',
'KBYI' : 'BURLEY, IDAHOAHO',
'KBYS' : 'FORT IRWIN, CALIFORNIA',
'KBYY' : 'BAY CITY, TEXAS',
'KBZN' : 'BOZEMAN, MONTANA',
'KC09' : 'SARATOGA, ILLINOIS',
'KC73' : 'DIXON, ILLINOIS',
'KC75' : 'SPARLAND, ILLINOIS',
'KCAD' : 'CADILLAC, MICHIGAN',
'KCAE' : 'COLUMBIA, SOUTH CAROLINA',
'KCAG' : 'CRAIG, COLORADO',
'KCAK' : 'AKRON, OHIO',
'KCAO' : 'CLAYTON, NEW MEXICO',
'KCAR' : 'CARIBOU, MAINE',
'KCAV' : 'CLARION, IOWA',
'KCBE' : 'CUMBERLAND, MARYLAND',
'KCBF' : 'COUNCIL BLUFFS, IOWA',
'KCBG' : 'CAMBRIDGE, MINNESOTA',
'KCBM' : 'COLUMBUS, MISSISSIPPI',
'KCCO' : 'ATLANTA, GEORGIA',
'KCCR' : 'CONCORD, CALIFORNIA',
'KCCU' : 'COPPER MOUNTAIN, COLORADO',
'KCCY' : 'CHARLES CITY, IOWA',
'KCDC' : 'CEDAR CITY, UTAH',
'KCDD' : 'CRANE LAKE, MINNESOTA',
'KCDH' : 'CAMDEN, ARKANSAS',
'KCDJ' : 'CHILLICOTHE, MISSOURI',
'KCDR' : 'CHADRON, NEBRASKA',
'KCDS' : 'CHILDRESS, TEXAS',
'KCDW' : 'CALDWELL, NEW JERSEY',
'KCEC' : 'CRESCENT CITY, CALIFORNIA',
'KCEF' : 'SPRINGFIELD, MASSACHUSETTS',
'KCEU' : 'CLEMSON, SOUTH CAROLINA',
'KCEW' : 'CRESTVIEW, FLORIDA',
'KCEZ' : 'CORTEZ, COLORADO',
'KCFE' : 'BUFFALO, MINNESOTA',
'KCFS' : 'CARO, MICHIGAN',
'KCFT' : 'CLIFTON, ARIZONA',
'KCFV' : 'COFFEYVILLE, KANSAS',
'KCGF' : 'CLEVELAND, OHIO',
'KCGI' : 'CAPE GIRARDEAU, MISSOURI',
'KCGX' : 'CHICAGO, ILLINOIS',
'KCGZ' : 'CASA GRANDE, ARIZONA',
'KCHA' : 'CHATTANOOGA, TENNESSEE',
'KCHD' : 'CHANDLER, ARIZONA',
'KCHI' : 'CHICAGO, ILLINOIS',
'KCHK' : 'CHICKASHA, OKLAHOMA',
'KCHO' : 'CHARLOTTESVILLE, VIRGINIA',
'KCHS' : 'CHARLESTON, SOUTH CAROLINA',
'KCIC' : 'CHICO, CALIFORNIA',
'KCID' : 'CEDAR RAPIDS, IOWA',
'KCIN' : 'CARROLL, IOWA',
'KCIR' : 'CAIRO, ILLINOIS',
'KCIU' : 'SAULT STE MARIE, MICHIGAN',
'KCJR' : 'CULPEPER, VIRGINIA',
'KCKB' : 'CLARKSBURG, WEST VIRGINIA',
'KCKC' : 'GRAND MARAIS, MINNESOTA',
'KCKN' : 'CROOKSTON, MINNESOTA',
'KCKP' : 'CHEROKEE, IOWA',
'KCKV' : 'CLARKSVILLE, TENNESSEE',
'KCLE' : 'CLEVELAND, OHIO',
'KCLI' : 'CLINTONVILLE, WISCONSIN',
'KCLK' : 'CLINTON, OKLAHOMA',
'KCLL' : 'COLLEGE STATION, TEXAS',
'KCLM' : 'PORT ANGELES, WASHINGTON',
'KCLT' : 'CHARLOTTE, NORTH CAROLINA',
'KCMA' : 'CAMARILLO, CALIFORNIA',
'KCMH' : 'COLUMBUS, OHIO',
'KCMI' : 'CHAMPAIGN-URBANA, ILLINOIS',
'KCMX' : 'HANCOCK, MICHIGAN',
'KCMY' : 'SPARTA, WISCONSIN',
'KCNB' : 'CANBY, MINNESOTA',
'KCNC' : 'CHARITON, IOWA',
'KCNK' : 'CONCORDIA, KANSAS',
'KCNM' : 'CARLSBAD, NEW MEXICO',
'KCNO' : 'CHINO, CALIFORNIA',
'KCNU' : 'CHANUTE, KANSAS',
'KCNY' : 'MOAB, UTAH',
'KCOD' : 'CODY, WYOMING',
'KCOE' : 'COEUR D\'ALENE, IDAHO',
'KCOF' : 'COCOA BEACH, FLORIDA',
'KCOI' : 'MERRITT ISLAND, FLORIDA',
'KCON' : 'CONCORD, NEW HAMPSHIRE',
'KCOQ' : 'CLOQUET, MINNESOTA',
'KCOS' : 'COLORADO SPRINGS, COLORADO',
'KCOT' : 'COTULLA, TEXAS',
'KCOU' : 'COLUMBIA, MISSOURI',
'KCPC' : 'WHITEVILLE, NORTH CAROLINA',
'KCPK' : 'NORFOLK, VIRGINIA',
'KCPR' : 'CASPER, WYOMING',
'KCPS' : 'CAHOKIA, ILLINOIS',
'KCPT' : 'CLEBURNE, TEXAS',
'KCPW' : 'WOLF CREEK PASS, COLORADO',
'KCQB' : 'CHANDLER, OKLAHOMA',
'KCQC' : 'CLINES CORNER, NEW MEXICO',
'KCQM' : 'COOK, MINNESOTA',
'KCQT' : 'LOS ANGELES, CALIFORNIA',
'KCQX' : 'CHATHAM, MASSACHUSETTS',
'KCRE' : 'NORTH MYRTLE BEACH, SOUTH CAROLINA',
'KCRG' : 'JACKSONVILLE, FLORIDA',
'KCRP' : 'CORPUS CHRISTI, TEXAS',
'KCRQ' : 'CARLSBAD, CALIFORNIA',
'KCRS' : 'CORSICANA, TEXAS',
'KCRW' : 'CHARLESTON, WEST VIRGINIA',
'KCSG' : 'COLUMBUS, GEORGIA',
'KCSL' : 'SAN LUIS OBISPO, CALIFORNIA',
'KCSM' : 'CLINTON, OKLAHOMA',
'KCSQ' : 'CRESTON, IOWA',
'KCSV' : 'CROSSVILLE, TENNESSEE',
'KCTB' : 'CUT BANK, MONTANA',
'KCTY' : 'CROSS CITY, FLORIDA',
'KCTZ' : 'CLINTON, NORTH CAROLINA',
'KCUB' : 'COLUMBIA, SOUTH CAROLINA',
'KCUH' : 'CUSHING, OKLAHOMA',
'KCUL' : 'CARMI, ILLINOIS',
'KCUT' : 'CUSTER, SOUTH DAKOTA',
'KCVG' : 'COVINGTON, KENTUCKY',
'KCVN' : 'CLOVIS, NEW MEXICO',
'KCVO' : 'CORVALLIS, OREGON',
'KCVS' : 'CLOVIS, NEW MEXICO',
'KCVX' : 'CHARLEVOIX, MICHIGAN',
'KCWA' : 'MOSINEE, WISCONSIN',
'KCWF' : 'LAKE CHARLES, LOUISIANA',
'KCWI' : 'CLINTON, IOWA',
'KCXO' : 'HOUSTON, TEXAS',
'KCXP' : 'CARSON CITY, NEVADA',
'KCXY' : 'HARRISBURG, PENNSYLVANIA',
'KCYS' : 'CHEYENNE, WYOMING',
'KCZD' : 'COZAD, NEBRASKA',
'KCZK' : 'CASCADE LOCKS, OREGON',
'KCZZ' : 'CAMPO, CALIFORNIA',
'KD07' : 'FAITH, SOUTH DAKOTA',
'KD50' : 'CROSBY, NORTH DAKOTA',
'KD55' : 'LANGDON, NORTH DAKOTA',
'KD60' : 'TIOGA, NORTH DAKOTA',
'KDAA' : 'FORT BELVOIR, VIRGINIA',
'KDAB' : 'DAYTONA BEACH, FLORIDA',
'KDAG' : 'DAGGETT, CALIFORNIA',
'KDAL' : 'DALLAS, TEXAS',
'KDAN' : 'DANVILLE, VIRGINIA',
'KDAW' : 'ROCHESTER, NEW HAMPSHIRE',
'KDAY' : 'DAYTON, OHIO',
'KDBN' : 'DUBLIN, GEORGIA',
'KDBQ' : 'DUBUQUE, IOWA',
'KDCA' : 'WASHINGTON, DC',
'KDCU' : 'DECATUR, ALABAMA',
'KDDC' : 'DODGE CITY, KANSAS',
'KDDH' : 'BENNINGTON, VERMONT',
'KDEC' : 'DECATUR, ILLINOIS',
'KDEH' : 'DECORAH, IOWA',
'KDEN' : 'DENVER, COLORADO',
'KDEQ' : 'DE QUEEN, ARKANSAS',
'KDET' : 'DETROIT, MICHIGAN',
'KDEW' : 'DEER PARK, WASHINGTON',
'KDFI' : 'DEFIANCE, OHIO',
'KDFW' : 'DALLAS-FORT WORTH, TEXAS',
'KDGW' : 'DOUGLAS, WYOMING',
'KDHN' : 'DOTHAN, ALABAMA',
'KDHT' : 'DALHART, TEXAS',
'KDIK' : 'DICKINSON, NORTH DAKOTA',
'KDKB' : 'DE KALB, ILLINOIS',
'KDKK' : 'DUNKIRK, NEW YORK',
'KDKR' : 'CROCKETT, TEXAS',
'KDKX' : 'KNOXVILLE, TENNESSEE',
'KDLF' : 'DEL RIO, TEXAS',
'KDLH' : 'DULUTH, MINNESOTA',
'KDLL' : 'BARABOO, WISCONSIN',
'KDLN' : 'DILLON, MONTANA',
'KDLS' : 'THE DALLES, OREGON',
'KDMA' : 'TUCSON, ARIZONA',
'KDMH' : 'BALTIMORE, MARYLAND',
'KDMN' : 'DEMING, NEW MEXICO',
'KDMO' : 'SEDALIA, MISSOURI',
'KDNK' : 'POLK AAF, LOUISANA',
'KDNL' : 'AUGUSTA, GEORGIA',
'KDNN' : 'DALTON, GEORGIA',
'KDNR' : 'DENVER, COLORADO',
'KDNS' : 'DENISON, IOWA',
'KDNV' : 'DANVILLE, ILLINOIS',
'KDOV' : 'DOVER, DELAWARE',
'KDPA' : 'CHICAGO, ILLINOIS',
'KDPG' : 'DUGWAY, UTAH',
'KDPL' : 'KENANSVILLE, NORTH CAROLINA',
'KDQH' : 'DOUGLAS, GEORGIA',
'KDRA' : 'MERCURY, NEVADA',
'KDRI' : 'DE RIDDER, LOUISIANA',
'KDRO' : 'DURANGO, COLORADO',
'KDRT' : 'DEL RIO, TEXAS',
'KDSM' : 'DES MOINES, IOWA',
'KDSV' : 'DANSVILLE, NEW YORK',
'KDTL' : 'DETROIT LAKES, MINNESOTA',
'KDTN' : 'SHREVEPORT, LOUISIANA',
'KDTO' : 'DENTON, TEXAS',
'KDTS' : 'DESTIN, FLORIDA',
'KDTW' : 'DETROIT, MICHIGAN',
'KDTX' : 'DETROIT, MICHIGAN',
'KDUA' : 'DURANT, OKLAHOMA',
'KDUC' : 'DUNCAN, OKLAHOMA',
'KDUG' : 'DOUGLAS BISBEE, ARIZONA',
'KDUH' : 'LAMBERTVILLE, MICHIGAN',
'KDUJ' : 'DUBOIS, PENNSYLVANIA',
'KDUX' : 'DUMAS, TEXAS',
'KDVL' : 'DEVILS LAKE, NORTH DAKOTA',
'KDVN' : 'DAVENPORT, IOWA',
'KDVP' : 'SLAYTON, MINNESOTA',
'KDVT' : 'PHOENIX, ARIZONA',
'KDWH' : 'HOUSTON, TEXAS',
'KDXR' : 'DANBURY, CONNECTICUT',
'KDXX' : 'MADISON, MINNESOTA',
'KDYL' : 'DOYLESTOWN, PENNSYLVANIA',
'KDYR' : 'DYERSBURG, TENNESSEE',
'KDYS' : 'ABILENE, TEXAS',
'KDYT' : 'DULUTH, MINNESOTA',
'KE24' : 'FT APACHE, ARIZONA',
'KE33' : 'CHAMA, NEW MEXICO',
'KE38' : 'ALPINE, TEXAS',
'KE80' : 'BELEN, NEW MEXICO',
'KEAR' : 'KEARNEY, NEBRASKA',
'KEAT' : 'WENATCHEE, WASHINGTON',
'KEAU' : 'EAU CLAIRE, WISCONSIN',
'KEAX' : 'PLEASANT HILL, MISSOURI',
'KEBG' : 'EDINBURG, TEXAS',
'KEBS' : 'WEBSTER CITY, IOWA',
'KECG' : 'ELIZABETH CITY, NORTH CAROLINA',
'KECP' : 'PANAMA CITY, FLORIDA',
'KECU' : 'ROCKSPRINGS, TEXAS',
'KEDE' : 'EDENTON, NORTH CAROLINA',
'KEDW' : 'EDWARDS AFB, CALIFORNIA',
'KEED' : 'NEEDLES, CALIFORNIA',
'KEEN' : 'KEENE, NEW HAMPSHIRE',
'KEEO' : 'MEEKER, COLORADO',
'KEET' : 'ALABASTER, ALABAMA',
'KEFC' : 'BELLE FOURCHE, SOUTH DAKOTA',
'KEFD' : 'HOUSTON, TEXAS',
'KEFT' : 'MONROE, WISCONSIN',
'KEGE' : 'EAGLE, COLORADO',
'KEGI' : 'DUKE FLD, FLORIDA',
'KEGV' : 'EAGLE RIVER, WISCONSIN',
'KEHA' : 'ELKHART, KANSAS',
'KEHO' : 'SHELBY, NORTH CAROLINA',
'KEHR' : 'HENDERSON, KENTUCKY',
'KEKA' : 'EUREKA, CALIFORNIA',
'KEKM' : 'ELKHART, INDIANA',
'KEKN' : 'ELKINS, WEST VIRGINIA',
'KEKO' : 'ELKO, NEVADA',
'KEKQ' : 'MONTICELLO, KENTUCKY',
'KELD' : 'EL DORADO, ARKANSAS',
'KELM' : 'ELMIRA, NEW YORK',
'KELN' : 'ELLENSBURG, WASHINGTON',
'KELO' : 'ELY, MINNESOTA',
'KELP' : 'EL PASO, TEXAS',
'KELY' : 'ELY, NEVADA',
'KELZ' : 'WELLSVILLE, NEW YORK',
'KEMP' : 'EMPORIA, KANSAS',
'KEMT' : 'EL MONTE, CALIFORNIA',
'KEMV' : 'EMPORIA, VIRGINIA',
'KEND' : 'ENID, OKLAHOMA',
'KENL' : 'CENTRALIA, ILLINOIS',
'KENV' : 'WENDOVER, UTAH',
'KENW' : 'KENOSHA, WISCONSIN',
'KEOK' : 'KEOKUK, IOWA',
'KEPH' : 'EPHRATA, WASHINGTON',
'KEPZ' : 'SANTA TERESA, NEW MEXICO',
'KEQY' : 'MONROE, NORTH CAROLINA',
'KERI' : 'ERIE, PENNSYLVANIA',
'KERV' : 'KERRVILLE, TEXAS',
'KERY' : 'NEWBERRY, MICHIGAN',
'KESC' : 'ESCANABA, MICHIGAN',
'KESF' : 'ALEXANDRIA, LOUISIANA',
'KESN' : 'EASTON, MARYLAND',
'KEST' : 'ESTHERVILLE, IOWA',
'KETB' : 'WEST BEND, WISCONSIN',
'KETH' : 'WHEATON, MINNESOTA',
'KEUF' : 'EUFAULA, ALABAMA',
'KEUG' : 'EUGENE, OREGON',
'KEUL' : 'CALDWELL, IDAHOAHO',
'KEVB' : 'NEW SMYRNA BEACH, FLORIDA',
'KEVM' : 'EVELETH, MINNESOTA',
'KEVV' : 'EVANSVILLE, INDIANA',
'KEVW' : 'EVANSTON, WYOMING',
'KEWB' : 'NEW BEDFORD, MASSACHUSETTS',
'KEWK' : 'NEWTON, KANSAS',
'KEWN' : 'NEW BERN, NORTH CAROLINA',
'KEWR' : 'NEWARK, NEW JERSEY',
'KEXX' : 'LEXINGTON, NORTH CAROLINA',
'KEYE' : 'INDIANAPOLIS, INDIANA',
'KEYF' : 'ELIZABETHTOWN, NORTH CAROLINA',
'KEYW' : 'KEY WEST, FLORIDA',
'KEZF' : 'FREDERICKSBURG, VIRGINIA',
'KEZM' : 'EASTMAN, GEORGIA',
'KF05' : 'VERNON, TEXAS',
'KF10' : 'HENRYETTA, OKLAHOMA',
'KF30' : 'SULPHUR, OKLAHOMA',
'KFAF' : 'FORT EUSTIS, VIRGINIA',
'KFAM' : 'FARMINGTON, MISSOURI',
'KFAR' : 'FARGO, NORTH DAKOTA',
'KFAT' : 'FRESNO, CALIFORNIA',
'KFAY' : 'FAYETTEVILLE, NORTH CAROLINA',
'KFBG' : 'FORT BRAGG, NORTH CAROLINA',
'KFBL' : 'FARIBAULT, MINNESOTA',
'KFCH' : 'FRESNO, CALIFORNIA',
'KFCM' : 'MINNEAPOLIS, MINNESOTA',
'KFCS' : 'FORT CARSON, COLORADO',
'KFDK' : 'FREDERICK, MARYLAND',
'KFDR' : 'FREDERICK, OKLAHOMA',
'KFDY' : 'FINDLAY, OHIO',
'KFEP' : 'FREEPORT, ILLINOIS',
'KFET' : 'FREMONT, NEBRASKA',
'KFFA' : 'KILL DEVIL HILLS, NORTH CAROLINA',
'KFFC' : 'ATLANTA, GEORGIA',
'KFFL' : 'FAIRFIELD, IOWA',
'KFFM' : 'FERGUS FALLS, MINNESOTA',
'KFFO' : 'DAYTON, OHIO',
'KFFT' : 'FRANKFORT, KENTUCKY',
'KFFZ' : 'MESA, ARIZONA',
'KFGN' : 'FLAG ISLAND, MINNESOTA',
'KFHR' : 'FRIDAY HARBOR, WASHINGTON',
'KFHU' : 'FORT HUACHUCA SIERRA VISTA, ARIZONA',
'KFIG' : 'CLEARFIELD, PENNSYLVANIA',
'KFIT' : 'FITCHBURG, MASSACHUSETTS',
'KFKA' : 'PRESTON, MINNESOTA',
'KFKL' : 'FRANKLIN, PENNSYLVANIA',
'KFKN' : 'FRANKLIN, VIRGINIA',
'KFKS' : 'FRANKFORT, MICHIGAN',
'KFLD' : 'FOND DU LAC, WISCONSIN',
'KFLG' : 'FLAGSTAFF, ARIZONA',
'KFLL' : 'FORT LAUDERDALE, FLORIDA',
'KFLO' : 'FLORENCE, SOUTH CAROLINA',
'KFLP' : 'FLIPPIN, ARKANSAS',
'KFLV' : 'FORT LEAVENWORTH, KANSAS',
'KFME' : 'FORT MEADE(ODENTON), MARYLAND',
'KFMH' : 'FALMOUTH, MASSACHUSETTS',
'KFMN' : 'FARMINGTON, NEW MEXICO',
'KFMY' : 'FORT MYERS, FLORIDA',
'KFNB' : 'FALLS CITY, NEBRASKA',
'KFNL' : 'FORT COLLINS, COLORADO',
'KFNT' : 'FLINT, MICHIGAN',
'KFOA' : 'FLORA, ILLINOIS',
'KFOD' : 'FORT DODGE, IOWA',
'KFOE' : 'TOPEKA, KANSAS',
'KFOK' : 'WESTHAMPTON BEACH, NEW YORK',
'KFOZ' : 'BIGFORK, MINNESOTA',
'KFPK' : 'CHARLOTTE, MICHIGAN',
'KFPR' : 'FORT PIERCE, FLORIDA',
'KFQD' : 'RUTHERFORDTON, NORTH CAROLINA',
'KFRG' : 'FARMINGDALE, NEW YORK',
'KFRI' : 'FORT RILEY, KANSAS',
'KFRM' : 'FAIRMONT, MINNESOTA',
'KFSD' : 'SIOUX FALLS, SOUTH DAKOTA',
'KFSE' : 'FOSSTON, MINNESOTA',
'KFSI' : 'LAWTON, OKLAHOMA',
'KFSM' : 'FORT SMITH, ARKANSAS',
'KFST' : 'FORT STOCKTON, TEXAS',
'KFSW' : 'FORT MADISON, IOWA',
'KFTG' : 'DENVER, COLORADO',
'KFTK' : 'FORT KNOX, KENTUCKY',
'KFTW' : 'FORT WORTH, TEXAS',
'KFTY' : 'ATLANTA, GEORGIA',
'KFUL' : 'FULLERTON, CALIFORNIA',
'KFVE' : 'FRENCHVILLE, MAINE',
'KFVX' : 'FARMVILLE, VIRGINIA',
'KFWA' : 'FORT WAYNE, INDIANA',
'KFWC' : 'FAIRFIELD, ILLINOIS',
'KFWN' : 'SUSSEX, NEW JERSEY',
'KFWQ' : 'MONONGAHELA, PENNSYLVANIA',
'KFWS' : 'FORT WORTH, TEXAS',
'KFXE' : 'FORT LAUDERDALE, FLORIDA',
'KFYJ' : 'WEST POINT, VIRGINIA',
'KFYV' : 'FAYETTEVILLE, ARKANSAS',
'KFZY' : 'FULTON, NEW YORK',
'KGAD' : 'GADSDEN, ALABAMA',
'KGAF' : 'GRAFTON, NORTH DAKOTA',
'KGAG' : 'GAGE, OKLAHOMA',
'KGAO' : 'GALLIANO, LOUISIANA',
'KGBD' : 'GREAT BEND, KANSAS',
'KGBG' : 'GALESBURG, ILLINOIS',
'KGBN' : 'GILA BEND, ARIZONA',
'KGCC' : 'GILLETTE, WYOMING',
'KGCK' : 'GARDEN CITY, KANSAS',
'KGCM' : 'CLAREMORE, OKLAHOMA',
'KGCN' : 'GRAND CANYON, ARIZONA',
'KGDB' : 'GRANITE FALLS, MINNESOTA',
'KGDJ' : 'GRANBURY, TEXAS',
'KGDP' : 'GUADALUPE PASS, TEXAS',
'KGDV' : 'GLENDIVE, MONTANA',
'KGED' : 'GEORGETOWN, DELAWARE',
'KGEG' : 'SPOKANE, WASHINGTON',
'KGEU' : 'GLENDALE, ARIZONA',
'KGEV' : 'JEFFERSON, NORTH CAROLINA',
'KGEY' : 'GREYBULL, WYOMING',
'KGEZ' : 'SHELBYVILLE, INDIANA',
'KGFA' : 'GREAT FALLS, MONTANA',
'KGFK' : 'GRAND FORKS, NORTH DAKOTA',
'KGFL' : 'GLENS FALLS, NEW YORK',
'KGGG' : 'LONGVIEW, TEXAS',
'KGGW' : 'GLASGOW, MONTANA',
'KGHW' : 'GLENWOOD, MINNESOTA',
'KGID' : 'GRAND ISLAND, NEBRASKA',
'KGIF' : 'WINTER HAVEN, FLORIDA',
'KGJT' : 'GRAND JUNCTION, COLORADO',
'KGKJ' : 'MEADVILLE, PENNSYLVANIA',
'KGKY' : 'ARLINGTON, TEXAS',
'KGLD' : 'GOODLAND, KANSAS',
'KGLE' : 'GAINESVILLE, TEXAS',
'KGLH' : 'GREENVILLE, MISSISSIPPI',
'KGLR' : 'GAYLORD, MICHIGAN',
'KGLS' : 'GALVESTON, TEXAS',
'KGLW' : 'GLASGOW, KENTUCKY',
'KGMJ' : 'GROVE, OKLAHOMA',
'KGMU' : 'GREENVILLE, SOUTH CAROLINA',
'KGNA' : 'GRAND MARAIS, MINNESOTA',
'KGNC' : 'SEMINOLE, TEXAS',
'KGNR' : 'GREENVILLE, MAINE',
'KGNT' : 'GRANTS, NEW MEXICO',
'KGNV' : 'GAINESVILLE, FLORIDA',
'KGOK' : 'GUTHRIE, OKLAHOMA',
'KGON' : 'GROTON, CONNECTICUT',
'KGOP' : 'GATESVILLE, TEXAS',
'KGOV' : 'GRAYLING, MICHIGAN',
'KGPI' : 'KALISPELL, MONTANA',
'KGPM' : 'GRAND PRAIRIE, TEXAS',
'KGPT' : 'GULFPORT, MISSISSIPPI',
'KGPZ' : 'GRAND RAPIDS, MINNESOTA',
'KGRB' : 'GREEN BAY, WISCONSIN',
'KGRD' : 'GREENWOOD, SOUTH CAROLINA',
'KGRF' : 'TACOMA, WASHINGTON',
'KGRI' : 'GRAND ISLAND, NEBRASKA',
'KGRK' : 'FORT HOOD, TEXAS',
'KGRN' : 'GORDON, NEBRASKA',
'KGRR' : 'GRAND RAPIDS, MICHIGAN',
'KGSB' : 'GOLDSBORO, NORTH CAROLINA',
'KGSH' : 'GOSHEN, INDIANA',
'KGSO' : 'GREENSBORO, NORTH CAROLINA',
'KGSP' : 'GREER, SOUTH CAROLINA',
'KGTB' : 'FORT DRUM, NEW YORK',
'KGTF' : 'GREAT FALLS, MONTANA',
'KGTR' : 'COLUMBUS, MISSISSIPPI',
'KGTU' : 'GEORGETOWN, TEXAS',
'KGUC' : 'GUNNISON, COLORADO',
'KGUP' : 'GALLUP, NEW MEXICO',
'KGUS' : 'BUNKER HILL, INDIANA',
'KGUY' : 'GUYMON, OKLAHOMA',
'KGVL' : 'GAINESVILLE, GEORGIA',
'KGVT' : 'GREENVILLE, TEXAS',
'KGVW' : 'KANSAS CITY, MISSOURI',
'KGWO' : 'GREENWOOD, MISSISSIPPI',
'KGWR' : 'GWINNER, NORTH DAKOTA',
'KGWW' : 'GOLDSBORO, NORTH CAROLINA',
'KGXY' : 'GREELEY, COLORADO',
'KGYB' : 'GIDDINGS, TEXAS',
'KGYI' : 'SHERMAN, TEXAS',
'KGYL' : 'GLENCOE, MINNESOTA',
'KGYR' : 'GOODYEAR, ARIZONA',
'KGYY' : 'GARY, INDIANA',
'KGZH' : 'EVERGREEN, ALABAMA',
'KH92' : 'HOMINY, OKLAHOMA',
'KHAO' : 'HAMILTON, OHIO',
'KHAT' : 'CAPE HATTERAS, NORTH CAROLINA',
'KHBG' : 'HATTIESBURG, MISSISSIPPI',
'KHBI' : 'ASHEBORO, NORTH CAROLINA',
'KHBR' : 'HOBART, OKLAHOMA',
'KHBV' : 'HEBBRONVILLE, TEXAS',
'KHCD' : 'HUTCHINSON, MINNESOTA',
'KHCO' : 'HALLOCK, MINNESOTA',
'KHDC' : 'HAMMOND, LOUISIANA',
'KHDE' : 'HOLDREGE, NEBRASKA',
'KHDN' : 'HAYDEN, COLORADO',
'KHDO' : 'HONDO, TEXAS',
'KHEF' : 'WASHINGTON, DIST, OF COLUMBIA',
'KHEI' : 'HETTINGER, NORTH DAKOTA',
'KHEY' : 'OZARK, ALABAMA',
'KHEZ' : 'NATCHEZ, MISSISSIPPI',
'KHFD' : 'HARTFORD, CONNECTICUT',
'KHFF' : 'HOFFMAN, NORTH CAROLINA',
'KHGR' : 'HAGERSTOWN, MARYLAND',
'KHGX' : 'HOUSTON, TEXAS',
'KHHF' : 'CANADIAN, TEXAS',
'KHHR' : 'HAWTHORNE, CALIFORNIA',
'KHIB' : 'HIBBING, MINNESOTA',
'KHIE' : 'WHITEFIELD, NEW HAMPSHIRE',
'KHIF' : 'OGDEN, UTAH',
'KHIO' : 'PORTLAND, OREGON',
'KHJH' : 'HEBRON, NEBRASKA',
'KHJO' : 'HANFORD, CALIFORNIA',
'KHKA' : 'BLYTHEVILLE, ARKANSAS',
'KHKS' : 'JACKSON, MISSISSIPPI',
'KHKY' : 'HICKORY, NORTH CAROLINA',
'KHLC' : 'HILL CITY, KANSAS',
'KHLG' : 'WHEELING, WEST VIRGINIA',
'KHLN' : 'HELENA, MONTANA',
'KHLR' : 'KILLEEN, TEXAS',
'KHLX' : 'GALAX HILLSVILLE, VIRGINIA',
'KHMN' : 'ALAMOGORDO, NEW MEXICO',
'KHMZ' : 'BEDFORD, PENNSYLVANIA',
'KHNB' : 'HUNTINGBURG, INDIANA',
'KHND' : 'LAS VEGAS, NEVADA',
'KHNR' : 'HARLAN, IOWA',
'KHNZ' : 'OXFORD, NORTH CAROLINA',
'KHOB' : 'HOBBS, NEW MEXICO',
'KHON' : 'HURON, SOUTH DAKOTA',
'KHOP' : 'FORT CAMPBELL, KENTUCKY',
'KHOT' : 'HOT SPRINGS, ARKANSAS',
'KHOU' : 'HOUSTON, TEXAS',
'KHPN' : 'WHITE PLAINS, NEW YORK',
'KHQM' : 'HOQUIAM, WASHINGTON',
'KHQU' : 'THOMSON, GEORGIA',
'KHQZ' : 'MESQUITE, TEXAS',
'KHRI' : 'HERMISTON, OREGON',
'KHRJ' : 'ERWIN, NORTH CAROLINA',
'KHRL' : 'HARLINGEN, TEXAS',
'KHRO' : 'HARRISON, ARKANSAS',
'KHRT' : 'MARY ESTHER, FLORIDA',
'KHSA' : 'BAY ST LOUIS, MISSISSIPPI',
'KHSB' : 'HARRISBURG, ILLINOIS',
'KHSE' : 'HATTERAS, NORTH CAROLINA',
'KHSI' : 'HASTINGS, NEBRASKA',
'KHSP' : 'HOT SPRINGS, VIRGINIA',
'KHST' : 'HOMESTEAD, FLORIDA',
'KHSV' : 'HUNTSVILLE, ALABAMA',
'KHTH' : 'HAWTHORNE, NEVADA',
'KHTL' : 'HOUGHTON LAKE, MICHIGAN',
'KHTO' : 'EAST HAMPTON, NEW YORK',
'KHTS' : 'HUNTINGTON, WEST VIRGINIA',
'KHUF' : 'TERRE HAUTE, INDIANA',
'KHUL' : 'HOULTON, MAINE',
'KHUM' : 'HOUMA, LOUISIANA',
'KHUN' : 'HUNTSVILLE, ALABAMA',
'KHUT' : 'HUTCHINSON, KANSAS',
'KHVN' : 'NEW HAVEN, CONNECTICUT',
'KHVR' : 'HAVRE, MONTANA',
'KHWD' : 'HAYWARD, CALIFORNIA',
'KHWO' : 'HOLLYWOOD, FLORIDA',
'KHWV' : 'SHIRLEY, NEW YORK',
'KHXD' : 'HILTON HEAD ISLAND, SOUTH CAROLINA',
'KHYA' : 'HYANNIS, MASSACHUSETTS',
'KHYI' : 'SAN MARCOS, TEXAS',
'KHYR' : 'HAYWARD, WISCONSIN',
'KHYS' : 'HAYS, KANSAS',
'KHYX' : 'SAGINAW, MICHIGAN',
'KHZE' : 'HAZEN, NORTH DAKOTA',
'KHZX' : 'MC GREGOR, MINNESOTA',
'KHZY' : 'ASHTABULA, OHIO',
'KI12' : 'SIDNEY, OHIO',
'KI16' : 'WINDOM, WEST VIRGINIA',
'KI63' : 'MOUNT STERLING, ILLINOIS',
'KIAB' : 'WICHITA, KANSAS',
'KIAD' : 'WASHINGTON, DIST, OF COLUMBIA',
'KIAG' : 'NIAGARA FALLS, NEW YORK',
'KIAH' : 'HOUSTON, TEXAS',
'KIBM' : 'KIMBALL, NEBRASKA',
'KICL' : 'CLARINDA, IOWA',
'KICR' : 'WINNER, SOUTH DAKOTA',
'KICT' : 'WICHITA, KANSAS',
'KIDA' : 'IDAHO FALLS, IDAHOAHO',
'KIDI' : 'INDIANA, PENNSYLVANIA',
'KIEN' : 'PINE RIDGE, SOUTH DAKOTA',
'KIER' : 'NATCHITOCHES, LOUISIANA',
'KIFP' : 'BULLHEAD CITY, ARIZONA',
'KIGM' : 'KINGMAN, ARIZONA',
'KIGQ' : 'CHICAGO, ILLINOIS',
'KIGX' : 'CHAPEL HILL, NORTH CAROLINA',
'KIIB' : 'INDEPENDENCE, IOWA',
'KIIY' : 'WASHINGTON, GEORGIA',
'KIJD' : 'WILLIMANTIC, CONNECTICUT',
'KIJX' : 'JACKSONVILLE, ILLINOIS',
'KIKK' : 'KANKAKEE, ILLINOIS',
'KIKV' : 'ANKENY, IOWA',
'KILE' : 'KILLEEN, TEXAS',
'KILG' : 'WILMINGTON, DELAWARE',
'KILM' : 'WILMINGTON, NORTH CAROLINA',
'KILN' : 'WILMINGTON, OHIO',
'KILX' : 'LINCOLN, ILLINOIS',
'KIML' : 'IMPERIAL, NEBRASKA',
'KIMT' : 'IRON MOUNTAIN KINGSFORD, MICHIGAN',
'KIND' : 'INDIANAPOLIS, INDIANA',
'KINJ' : 'HILLSBORO, TEXAS',
'KINK' : 'WINK, TEXAS',
'KINL' : 'INTERNATIONAL FALLS, MINNESOTA',
'KINS' : 'INDIAN SPRINGS, NEVADA',
'KINT' : 'WINSTON SALEM, NORTH CAROLINA',
'KINW' : 'WINSLOW, ARIZONA',
'KIOW' : 'IOWA CITY, IOWA',
'KIPJ' : 'LINCOLNTON, NORTH CAROLINA',
'KIPL' : 'IMPERIAL, CALIFORNIA',
'KIPT' : 'WILLIAMSPORT, PENNSYLVANIA',
'KIRK' : 'KIRKSVILLE, MISSOURI',
'KIRS' : 'STURGIS, MICHIGAN',
'KISM' : 'ORLANDO, FLORIDA',
'KISN' : 'WILLISTON, NORTH DAKOTA',
'KISO' : 'KINSTON, NORTH CAROLINA',
'KISP' : 'NEW YORK, NEW YORK',
'KISQ' : 'MANISTIQUE, MICHIGAN',
'KISW' : 'WISCONSIN RAPIDS, WISCONSIN',
'KITH' : 'ITHACA, NEW YORK',
'KITR' : 'BURLINGTON, COLORADO',
'KIWA' : 'PHOENIX, ARIZONA',
'KIWD' : 'IRONWOOD, MICHIGAN',
'KIWI' : 'WISCASSET, MAINE',
'KIWS' : 'HOUSTON, TEXAS',
'KIXD' : 'OLATHE, KANSAS',
'KIYK' : 'INYOKERN, CALIFORNIA',
'KIZA' : 'SANTA YNEZ, CALIFORNIA',
'KIZG' : 'FRYEBURG, MAINE',
'KJAC' : 'JACKSON, WYOMING',
'KJAN' : 'JACKSON, MISSISSIPPI',
'KJAS' : 'JASPER, TEXAS',
'KJAX' : 'JACKSONVILLE, FLORIDA',
'KJBR' : 'JONESBORO, ARKANSAS',
'KJCT' : 'JUNCTION, TEXAS',
'KJDD' : 'MINEOLA, TEXAS',
'KJDN' : 'JORDAN, MONTANA',
'KJEF' : 'JEFFERSON CITY, MISSOURI',
'KJER' : 'JEROME, IDAHOAHO',
'KJES' : 'JESUP, GEORGIA',
'KJFK' : 'NEW YORK, NEW YORK',
'KJGG' : 'WILLIAMSBURG, VIRGINIA',
'KJHW' : 'JAMESTOWN, NEW YORK',
'KJKJ' : 'MOORHEAD, MINNESOTA',
'KJKL' : 'JACKSON, KENTUCKY',
'KJLN' : 'JOPLIN, MISSOURI',
'KJMR' : 'MORA, MINNESOTA',
'KJMS' : 'JAMESTOWN, NORTH DAKOTA',
'KJNX' : 'SMITHFIELD, NORTH CAROLINA',
'KJOT' : 'JOLIET, ILLINOIS',
'KJQF' : 'CONCORD, NORTH CAROLINA',
'KJSO' : 'JACKSONVILLE, TEXAS',
'KJST' : 'JOHNSTOWN, PENNSYLVANIA',
'KJSV' : 'SALLISAW, OKLAHOMA',
'KJVL' : 'JANESVILLE, WISCONSIN',
'KJWG' : 'WATONGA, OKLAHOMA',
'KJWY' : 'MIDLOTHIAN, TEXAS',
'KJXI' : 'GILMER, TEXAS',
'KJXN' : 'JACKSON, MICHIGAN',
'KJYG' : 'ST JAMES, MINNESOTA',
'KJYL' : 'SYLVANIA, GEORGIA',
'KJYM' : 'HILLSDALE, MICHIGAN',
'KJYO' : 'LEESBURG, VIRGINIA',
'KJYR' : 'YORK, NEBRASKA',
'KKLS' : 'KELSO, WASHINGTON',
'KL49' : 'ROSEMONT, CALIFORNIA',
'KLAA' : 'LAMAR, COLORADO',
'KLAF' : 'LAFAYETTE, INDIANA',
'KLAL' : 'LAKELAND, FLORIDA',
'KLAM' : 'LOS ALAMOS, NEW MEXICO',
'KLAN' : 'LANSING, MICHIGAN',
'KLAR' : 'LARAMIE, WYOMING',
'KLAS' : 'LAS VEGAS, NEVADA',
'KLAW' : 'LAWTON, OKLAHOMA',
'KLAX' : 'LOS ANGELES, CALIFORNIA',
'KLBB' : 'LUBBOCK, TEXAS',
'KLBE' : 'LATROBE, PENNSYLVANIA',
'KLBF' : 'NORTH PLATTE, NEBRASKA',
'KLBL' : 'LIBERAL, KANSAS',
'KLBR' : 'CLARKSVILLE, TEXAS',
'KLBT' : 'LUMBERTON, NORTH CAROLINA',
'KLBX' : 'ANGLETON, TEXAS',
'KLCG' : 'WAYNE, NEBRASKA',
'KLCH' : 'LAKE CHARLES, LOUISIANA',
'KLCI' : 'LACONIA, NEW HAMPSHIRE',
'KLCK' : 'COLUMBUS, OHIO',
'KLDM' : 'LUDINGTON, MICHIGAN',
'KLEB' : 'LEBANON, NEW HAMPSHIRE',
'KLEE' : 'LEESBURG, FLORIDA',
'KLEW' : 'AUBURN, MAINE',
'KLEX' : 'LEXINGTON, KENTUCKY',
'KLFI' : 'HAMPTON, VIRGINIA',
'KLFK' : 'LUFKIN, TEXAS',
'KLFT' : 'LAFAYETTE, LOUISIANA',
'KLGA' : 'NEW YORK, NEW YORK',
'KLGB' : 'LONG BEACH, CALIFORNIA',
'KLGC' : 'LAGRANGE, GEORGIA',
'KLGD' : 'LA GRANDE, OREGON',
'KLGU' : 'LOGAN, UTAH',
'KLHB' : 'HEARNE, TEXAS',
'KLHQ' : 'LANCASTER, OHIO',
'KLHW' : 'FORT STEWART(HINESVILLE), GEORGIA',
'KLHX' : 'LA JUNTA, COLORADO',
'KLHZ' : 'LOUISBURG, NORTH CAROLINA',
'KLIC' : 'LIMON, COLORADO',
'KLIT' : 'LITTLE ROCK, ARKANSAS',
'KLIX' : 'SLIDELL, LOUISANA',
'KLJF' : 'LITCHFIELD, MINNESOTA',
'KLKU' : 'LOUISA, VIRGINIA',
'KLKV' : 'LAKEVIEW, OREGON',
'KLLJ' : 'CHALLIS, IDAHOAHO',
'KLLQ' : 'MONTICELLO, ARKANSAS',
'KLMT' : 'KLAMATH FALLS, OREGON',
'KLNC' : 'LANCASTER, TEXAS',
'KLND' : 'LANDER, WYOMING',
'KLNK' : 'LINCOLN, NEBRASKA',
'KLNL' : 'LAND O\' LAKES, WISCONSIN',
'KLNN' : 'WILLOUGHBY, OHIO',
'KLNP' : 'WISE, VIRGINIA',
'KLNR' : 'LONE ROCK, WISCONSIN',
'KLNS' : 'LANCASTER, PENNSYLVANIA',
'KLOL' : 'LOVELOCK, NEVADA',
'KLOM' : 'PHILADELPHIA, PENNSYLVANIA',
'KLOR' : 'FORT RUCKER, ALABAMA',
'KLOT' : 'CHICAGO, ILLINOIS',
'KLOU' : 'LOUISVILLE, KENTUCKY',
'KLOZ' : 'LONDON, KENTUCKY',
'KLPC' : 'LOMPOC, CALIFORNIA',
'KLPR' : 'LORAIN, OHIO',
'KLRD' : 'LAREDO, TEXAS',
'KLRF' : 'LITTLE ROCK, ARKANSAS',
'KLRJ' : 'LE MARS, IOWA',
'KLRU' : 'LAS CRUCES, NEW MEXICO',
'KLSE' : 'LA CROSSE, WISCONSIN',
'KLSF' : 'COLUMBUS, GEORGIA',
'KLSV' : 'LAS VEGAS, NEVADA',
'KLSX' : 'ST CHARLES, MISSOURI',
'KLTS' : 'ALTUS, OKLAHOMA',
'KLUD' : 'DECATUR, TEXAS',
'KLUF' : 'PHOENIX, ARIZONA',
'KLUK' : 'CINCINNATI, OHIO',
'KLUM' : 'MENOMONIE, WISCONSIN',
'KLVJ' : 'HOUSTON, TEXAS',
'KLVK' : 'LIVERMORE, CALIFORNIA',
'KLVM' : 'LIVINGSTON, MONTANA',
'KLVN' : 'MINNEAPOLIS, MINNESOTA',
'KLVS' : 'LAS VEGAS, NEW MEXICO',
'KLWA' : 'SOUTH HAVEN, MICHIGAN',
'KLWB' : 'LEWISBURG, WEST VIRGINIA',
'KLWC' : 'LAWRENCE, KANSAS',
'KLWD' : 'LAMONI, IOWA',
'KLWM' : 'LAWRENCE, MASSACHUSETTS',
'KLWS' : 'LEWISTON, IDAHOAHO',
'KLWT' : 'LEWISTOWN, MONTANA',
'KLWV' : 'LAWRENCEVILLE, ILLINOIS',
'KLWX' : 'STERLING, MARYLAND',
'KLXL' : 'LITTLE FALLS, MINNESOTA',
'KLXN' : 'LEXINGTON, NEBRASKA',
'KLXT' : 'LEE\'S SUMMIT, MISSOURI',
'KLXV' : 'LEADVILLE, COLORADO',
'KLYH' : 'LYNCHBURG, VIRGINIA',
'KLYV' : 'LUVERNE, MINNESOTA',
'KLZK' : 'NORTH LITTLE ROCK, ARKANSAS',
'KLZU' : 'LAWRENCEVILLE, GEORGIA',
'KLZZ' : 'LAMPASAS, TEXAS',
'KM19' : 'NEWPORT, ARKANSAS',
'KM30' : 'METROPOLIS, ILLINOIS',
'KM82' : 'HUNTSVILLE, ALABAMA',
'KM97' : 'TUNICA, MISSISSIPPI',
'KMAE' : 'MADERA, CALIFORNIA',
'KMAF' : 'MIDLAND, TEXAS',
'KMAI' : 'MARIANNA, FLORIDA',
'KMAN' : 'NAMPA, IDAHOAHO',
'KMBG' : 'MOBRIDGE, SOUTH DAKOTA',
'KMBL' : 'MANISTEE, MICHIGAN',
'KMBS' : 'SAGINAW, MICHIGAN',
'KMCB' : 'MC COMB, MISSISSIPPI',
'KMCC' : 'SACRAMENTO, CALIFORNIA',
'KMCD' : 'MACKINAC ISLAND, MICHIGAN',
'KMCE' : 'MERCED, CALIFORNIA',
'KMCF' : 'TAMPA, FLORIDA',
'KMCI' : 'KANSAS CITY, MISSOURI',
'KMCK' : 'MC COOK, NEBRASKA',
'KMCN' : 'MACON, GEORGIA',
'KMCO' : 'ORLANDO, FLORIDA',
'KMCW' : 'MASON CITY, IOWA',
'KMDD' : 'MIDLAND, TEXAS',
'KMDH' : 'CARBONDALE, ILLINOIS',
'KMDQ' : 'HUNTSVILLE, ALABAMA',
'KMDT' : 'HARRISBURG, PENNSYLVANIA',
'KMDW' : 'CHICAGO, ILLINOIS',
'KMDZ' : 'MEDFORD, WISCONSIN',
'KMEB' : 'MAXTON, NORTH CAROLINA',
'KMEG' : 'MEMPHIS, TENNESSEE',
'KMEH' : 'MEACHAM, OREGON',
'KMEI' : 'MERIDIAN, MISSISSIPPI',
'KMEM' : 'MEMPHIS, TENNESSEE',
'KMER' : 'ATWATER, CALIFORNIA',
'KMEZ' : 'MENA, ARKANSAS',
'KMFD' : 'MANSFIELD, OHIO',
'KMFE' : 'MC ALLEN, TEXAS',
'KMFI' : 'MARSHFIELD, WISCONSIN',
'KMFR' : 'MEDFORD, OREGON',
'KMFV' : 'MELFA, VIRGINIA',
'KMGE' : 'MARIETTA, GEORGIA',
'KMGG' : 'MAPLE LAKE, MINNESOTA',
'KMGJ' : 'MONTGOMERY, NEW YORK',
'KMGM' : 'MONTGOMERY, ALABAMA',
'KMGN' : 'HARBOR SPRINGS, MICHIGAN',
'KMGR' : 'MOULTRIE, GEORGIA',
'KMGW' : 'MORGANTOWN, WEST VIRGINIA',
'KMGY' : 'DAYTON, OHIO',
'KMHE' : 'MITCHELL, SOUTH DAKOTA',
'KMHK' : 'MANHATTAN, KANSAS',
'KMHN' : 'MULLEN, NEBRASKA',
'KMHR' : 'SACRAMENTO, CALIFORNIA',
'KMHS' : 'MOUNT SHASTA, CALIFORNIA',
'KMHT' : 'MANCHESTER, NEW HAMPSHIRE',
'KMHV' : 'MOJAVE, CALIFORNIA',
'KMHX' : 'MOREHEAD CITY, NORTH CAROLINA',
'KMIA' : 'MIAMI, FLORIDA',
'KMIB' : 'MINOT, NORTH DAKOTA',
'KMIC' : 'MINNEAPOLIS, MINNESOTA',
'KMIE' : 'MUNCIE, INDIANA',
'KMIV' : 'MILLVILLE, NEW JERSEY',
'KMIW' : 'MARSHALLTOWN, IOWA',
'KMJQ' : 'JACKSON, MINNESOTA',
'KMKC' : 'KANSAS CITY, MISSOURI',
'KMKE' : 'MILWAUKEE, WISCONSIN',
'KMKG' : 'MUSKEGON, MICHIGAN',
'KMKJ' : 'MARION, VIRGINIA',
'KMKL' : 'JACKSON, TENNESSEE',
'KMKN' : 'COMANCHE, TEXAS',
'KMKO' : 'MUSKOGEE, OKLAHOMA',
'KMKT' : 'MANKATO, MINNESOTA',
'KMKX' : 'MILWAUKEE, WISCONSIN',
'KMLB' : 'MELBOURNE, FLORIDA',
'KMLC' : 'MC ALESTER, OKLAHOMA',
'KMLD' : 'MALAD CITY, IDAHOAHO',
'KMLE' : 'OMAHA, NEBRASKA',
'KMLF' : 'MILFORD, UTAH',
'KMLI' : 'MOLINE, ILLINOIS',
'KMLJ' : 'MILLEDGEVILLE, GEORGIA',
'KMLP' : 'MULLAN PASS, IDAHO',
'KMLS' : 'MILES CITY, MONTANA',
'KMLT' : 'MILLINOCKET, MAINE',
'KMLU' : 'MONROE, LOUISIANA',
'KMMH' : 'MAMMOTH LAKES, CALIFORNIA',
'KMMK' : 'MERIDEN, CONNECTICUT',
'KMML' : 'MARSHALL, MINNESOTA',
'KMMT' : 'EASTOVER, SOUTH CAROLINA,',
'KMMU' : 'MORRISTOWN, NEW JERSEY',
'KMMV' : 'MC MINNVILLE, OREGON',
'KMNH' : 'MONUMENT, COLORADO',
'KMNI' : 'MANNING, SOUTH CAROLINA',
'KMNM' : 'MENOMINEE, MICHIGAN',
'KMNN' : 'MARION, OHIO',
'KMOB' : 'MOBILE, ALABAMA',
'KMOD' : 'MODESTO, CALIFORNIA',
'KMOP' : 'MOUNT PLEASANT, MICHIGAN',
'KMOT' : 'MINOT, NORTH DAKOTA',
'KMOX' : 'MORRIS, MINNESOTA',
'KMPO' : 'MOUNT POCONO, PENNSYLVANIA',
'KMPV' : 'BARRE, VERMONT',
'KMPZ' : 'MOUNT PLEASANT, IOWA',
'KMQB' : 'MACOMB, ILLINOIS',
'KMQE' : 'MILTON, MASSACHUSETTS',
'KMQI' : 'MANTEO, NORTH CAROLINA',
'KMQT' : 'MARQUETTE, MICHIGAN',
'KMQY' : 'SMYRNA, TENNESSEE',
'KMRB' : 'MARTINSBURG, WEST VIRGINIA',
'KMRC' : 'COLUMBIA, TENNESSEE',
'KMRF' : 'MARFA, TEXAS',
'KMRH' : 'BEAUFORT, NORTH CAROLINA',
'KMRJ' : 'MINERAL POINT, WISCONSIN',
'KMRN' : 'MORGANTON, NORTH CAROLINA',
'KMRY' : 'MONTEREY, CALIFORNIA',
'KMSL' : 'MUSCLE SHOALS, ALABAMA',
'KMSN' : 'MADISON, WISCONSIN',
'KMSO' : 'MISSOULA, MONTANA',
'KMSP' : 'MINNEAPOLIS, MINNESOTA',
'KMSS' : 'MASSENA, NEW YORK',
'KMSV' : 'MONTICELLO, NEW YORK',
'KMSY' : 'NEW ORLEANS, LOUISIANA',
'KMTC' : 'MOUNT CLEMENS, MICHIGAN',
'KMTH' : 'MARATHON, FLORIDA',
'KMTJ' : 'MONTROSE, COLORADO',
'KMTN' : 'BALTIMORE, MARYLAND',
'KMTO' : 'MATTOON, ILLINOIS',
'KMTP' : 'MONTAUK, NEW YORK',
'KMTV' : 'MARTINSVILLE, VIRGINIA',
'KMTW' : 'MANITOWOC, WISCONSIN',
'KMTX' : 'MISSION, TEXAS',
'KMUI' : 'INDIANTOWN, PENNSYLVANIA',
'KMUO' : 'MOUNTAIN HOME, IDAHO',
'KMUT' : 'MUSCATINE, IOWA',
'KMVE' : 'MONTEVIDEO, MINNESOTA',
'KMVL' : 'MORRISVILLE, VERMONT',
'KMVN' : 'MOUNT VERNON, ILLINOIS',
'KMVY' : 'VINEYARD HAVEN, MASSACHUSETTS',
'KMWA' : 'MARION, ILLINOIS',
'KMWC' : 'MILWAUKEE, WISCONSIN',
'KMWH' : 'MOSES LAKE, WASHINGTON',
'KMWK' : 'MOUNT AIRY, NORTH CAROLINA',
'KMWL' : 'MINERAL WELLS, TEXAS',
'KMWM' : 'WINDOM, MINNESOTA',
'KMWN' : 'MT WASHINGTON, NEW HAMPSHIRE',
'KMWS' : 'MOUNT WILSON, CALIFORNIA',
'KMWT' : 'MOUNT IDA, ARKANSAS',
'KMXF' : 'MONTGOMERY, ALABAMA',
'KMXO' : 'MONTICELLO, IOWA',
'KMYF' : 'SAN DIEGO, CALIFORNIA',
'KMYL' : 'MC CALL, IDAHOAHO',
'KMYP' : 'MONARCH PASS, COLORADO',
'KMYR' : 'MYRTLE BEACH, SOUTH CAROLINA',
'KMYV' : 'MARYSVILLE, CALIFORNIA',
'KMZH' : 'MOOSE LAKE, MINNESOTA',
'KN00' : 'FULTON, NEW YORK',
'KN60' : 'GARRISON, NORTH DAKOTA',
'KNAK' : 'ANNAPOLIS, MARYLAND',
'KNBC' : 'BEAUFORT, SOUTH CAROLINA,',
'KNBE' : 'DALLAS, TEXAS',
'KNBG' : 'NEW ORLEANS, LOUISANA',
'KNBJ' : 'BARIN, ALABAMA',
'KNBT' : 'PINEY ISLAND, NORTH CAROLINA',
'KNCA' : 'JACKSONVILLE, NORTH CAROLINA',
'KNDZ' : 'MILTON, FLORIDA',
'KNEL' : 'LAKEHURST, NEW JERSEY',
'KNEN' : 'JACKSON, FLORIDA',
'KNEW' : 'NEW ORLEANS, LOUISIANA',
'KNFE' : 'FENTRESS, VIRGINIA',
'KNFG' : 'OCEANSIDE, CALIFORNIA',
'KNFJ' : 'MILTON, FLORIDA',
'KNFL' : 'FALLON, NEVADA',
'KNFW' : 'FORT WORTH, TEXAS',
'KNGP' : 'CORPUS CHRISTI, TEXAS',
'KNGU' : 'NORFOLK, VIRGINIA',
'KNGW' : 'CORPUS CHRISTI, TEXAS',
'KNHK' : 'PATUXENT RIVER, MARYLAND',
'KNHZ' : 'BRUNSWICK, MAINE',
'KNID' : 'INYOKERN, CALIFORNIA',
'KNIP' : 'JACKSONVILLE, FLORIDA',
'KNJK' : 'EL CENTRO, CALIFORNIA',
'KNJM' : 'SWANSBORO, NORTH CAROLINA',
'KNJW' : 'MERIDIAN, MISSISSIPPI',
'KNKT' : 'CHERRY POINT, NORTH CAROLINA',
'KNKX' : 'SAN DIEGO, CALIFORNIA',
'KNLC' : 'LEMOORE, CALIFORNIA',
'KNMM' : 'MERIDIAN, MISSISSIPPI',
'KNOG' : 'ORANGE GROVE, TEXAS',
'KNOW' : 'PORT ANGELES, WASHINGTON',
'KNPA' : 'PENSACOLA, FLORIDA',
'KNQA' : 'MILLINGTON, TENNESSEE',
'KNQI' : 'KINGSVILLE, TEXAS',
'KNQX' : 'KEY WEST, FLORIDA',
'KNRA' : 'COUPEVILLE, WASHINGTON',
'KNRB' : 'MAYPORT, FLORIDA',
'KNRC' : 'CROWS LANDING, CALIFORNIA',
'KNRS' : 'IMPERIAL BEACH, CALIFORNIA',
'KNSE' : 'MILTON, FLORIDA',
'KNSI' : 'SAN NICOLAS ISLAND, CALIFORNIA',
'KNTD' : 'POINT MUGU, CALIFORNIA',
'KNTU' : 'VIRGINIA BEACH, VIRGINIA',
'KNUC' : 'SAN CLEMENTE, CALIFORNIA',
'KNUI' : 'ST INIGOES, MARYLAND',
'KNUQ' : 'MOUNTAIN VIEW, CALIFORNIA',
'KNUW' : 'WHIDBEY ISLAND, WASHINGTON',
'KNXP' : 'TWENTYNINE PALMS, CALIFORNIA',
'KNXX' : 'WILLOW GROVE, PENNSYLVANIA',
'KNYC' : 'NEW YORK CITY, NEW YORK',
'KNYG' : 'QUANTICO, VIRGINIA',
'KNYL' : 'YUMA, ARIZONA',
'KNZY' : 'SAN DIEGO, CALIFORNIA',
'KO18' : 'HANFORD, CALIFORNIA',
'KO54' : 'WEAVERVILLE, CALIFORNIA',
'KO64' : 'FORT BRAGG, CALIFORNIA',
'KOAJ' : 'JACKSONVILLE, NORTH CAROLINA',
'KOAK' : 'OAKLAND, CALIFORNIA',
'KOAX' : 'OMAHA, NEBRASKA',
'KOBE' : 'OKEECHOBEE, FLORIDA',
'KOCF' : 'OCALA, FLORIDA',
'KOCH' : 'NACOGDOCHES, TEXAS',
'KOCW' : 'WASHINGTON, NORTH CAROLINA',
'KODO' : 'ODESSA, TEXAS',
'KODX' : 'ORD, NEBRASKA',
'KOEB' : 'COLDWATER, MICHIGAN',
'KOEO' : 'OSCEOLA, WISCONSIN',
'KOFF' : 'OMAHA, NEBRASKA',
'KOFK' : 'NORFOLK, NEBRASKA',
'KOFP' : 'RICHMOND, VIRGINIA',
'KOGA' : 'OGALLALA, NEBRASKA',
'KOGB' : 'ORANGEBURG, SOUTH CAROLINA',
'KOGD' : 'OGDEN, UTAH',
'KOGS' : 'OGDENSBURG, NEW YORK',
'KOJA' : 'WEATHERFORD, OKLAHOMA',
'KOJC' : 'OLATHE, KANSAS',
'KOKB' : 'OCEANSIDE, CALIFORNIA',
'KOKC' : 'OKLAHOMA CITY, OKLAHOMA',
'KOKH' : 'OAK HARBOR, WASHINGTON',
'KOKK' : 'KOKOMO, INDIANA',
'KOKM' : 'OKMULGEE, OKLAHOMA',
'KOKV' : 'WINCHESTER, VIRGINIA',
'KOKX' : 'NEW YORK CITY, NEW YORK',
'KOLD' : 'OLD TOWN, MAINE',
'KOLE' : 'OLEAN, NEW YORK',
'KOLF' : 'WOLF POINT, MONTANA',
'KOLM' : 'OLYMPIA, WASHINGTON',
'KOLS' : 'NOGALES, ARIZONA',
'KOLU' : 'COLUMBUS, NEBRASKA',
'KOLV' : 'OLIVE BRANCH, MISSISSIPPI',
'KOLY' : 'OLNEY-NOBLE, ILLINOIS',
'KOLZ' : 'OELWEIN, IOWA',
'KOMA' : 'OMAHA, NEBRASKA',
'KOMH' : 'ORANGE, VIRGINIA',
'KOMK' : 'OMAK, WASHINGTON',
'KOMN' : 'ORMOND BEACH, FLORIDA',
'KONA' : 'WINONA, MINNESOTA',
'KONL' : 'O\'NEILL, NEBRASKA',
'KONM' : 'SOCORRO, NEW MEXICO',
'KONO' : 'ONTARIO, OREGON',
'KONP' : 'NEWPORT, OREGON',
'KONT' : 'ONTARIO, CALIFORNIA',
'KONX' : 'CURRITUCK, NORTH CAROLINA',
'KONZ' : 'DETROIT, MICHIGAN',
'KOOA' : 'OSKALOOSA, IOWA',
'KOPF' : 'MIAMI, FLORIDA',
'KOPN' : 'THOMASTON, GEORGIA',
'KOQT' : 'OAK RIDGE, TENNESSEE',
'KOQU' : 'NORTH KINGSTOWN, RHODE ISLAND',
'KORB' : 'ORR, MINNESOTA',
'KORC' : 'ORANGE CITY, IOWA',
'KORD' : 'CHICAGO, ILLINOIS',
'KORE' : 'ORANGE, MASSACHUSETTS',
'KORF' : 'NORFOLK, VIRGINIA',
'KORG' : 'ORANGE, TEXAS',
'KORH' : 'WORCESTER, MASSACHUSETTS',
'KORL' : 'ORLANDO, FLORIDA',
'KORS' : 'EASTSOUND, WASHINGTON',
'KOSA' : 'MOUNT PLEASANT, TEXAS',
'KOSC' : 'OSCODA, MICHIGAN',
'KOSH' : 'OSHKOSH, WISCONSIN',
'KOSU' : 'COLUMBUS, OHIO',
'KOTG' : 'WORTHINGTON, MINNESOTA',
'KOTH' : 'NORTH BEND, OREGON',
'KOTM' : 'OTTUMWA, IOWA',
'KOUN' : 'NORMAN, OKLAHOMA',
'KOVE' : 'OROVILLE, CALIFORNIA',
'KOVL' : 'OLIVIA, MINNESOTA',
'KOVS' : 'BOSCOBEL, WISCONSIN',
'KOWA' : 'OWATONNA, MINNESOTA',
'KOWB' : 'OWENSBORO, KENTUCKY',
'KOWD' : 'NORWOOD, MASSACHUSETTS',
'KOXB' : 'OCEAN CITY, MARYLAND',
'KOXC' : 'OXFORD, CONNECTICUT',
'KOXR' : 'OXNARD, CALIFORNIA',
'KOXV' : 'KNOXVILLE, IOWA',
'KOZR' : 'FORT RUCKER, ALABAMA',
'KOZW' : 'HOWELL, MICHIGAN',
'KP01' : 'AJO, ARIZONA',
'KP28' : 'MEDICINE LODGE, KANSAS',
'KP53' : 'MUNSING LAKESHORE, MICHIGAN',
'KP58' : 'PORT HOPE, MICHIGAN',
'KP59' : 'COPPER HARBOR, MICHIGAN',
'KP60' : 'YELLOWSTONE, WYOMING',
'KP68' : 'EUREKA, NEVADA',
'KP69' : 'LOWELL, IDAHO',
'KP92' : 'SALT POINT, LOUISANA',
'KPAE' : 'EVERETT, WASHINGTON',
'KPAH' : 'PADUCAH, KENTUCKY',
'KPAM' : 'PANAMA CITY, FLORIDA',
'KPAO' : 'PALO ALTO, CALIFORNIA',
'KPBF' : 'PINE BLUFF, ARKANSAS',
'KPBG' : 'PLATTSBURGH, NEW YORK',
'KPBH' : 'PHILLIPS, WISCONSIN',
'KPBI' : 'WEST PALM BEACH, FLORIDA',
'KPCM' : 'PLANT CITY, FLORIDA',
'KPCZ' : 'WAUPACA, WISCONSIN',
'KPDC' : 'PRAIRIE DU CHIEN, WISCONSIN',
'KPDK' : 'ATLANTA, GEORGIA',
'KPDT' : 'PENDLETON, OREGON',
'KPDX' : 'PORTLAND, OREGON',
'KPEA' : 'PELLA, IOWA',
'KPEO' : 'PENN YAN, NEW YORK',
'KPEQ' : 'PECOS, TEXAS',
'KPEX' : 'PAYNESVILLE, MINNESOTA',
'KPFC' : 'PACIFIC CITY, OREGON',
'KPFN' : 'PANAMA CITY, FLORIDA',
'KPGA' : 'PAGE, ARIZONA',
'KPGD' : 'PUNTA GORDA, FLORIDA',
'KPGV' : 'GREENVILLE, NORTH CAROLINA',
'KPHD' : 'NEW PHILADELPHIA, OHIO',
'KPHF' : 'NEWPORT NEWS, VIRGINIA',
'KPHL' : 'PHILADELPHIA, PENNSYLVANIA',
'KPHN' : 'PORT HURON, MICHIGAN',
'KPHP' : 'PHILIP, SOUTH DAKOTA',
'KPHX' : 'PHOENIX, ARIZONA',
'KPIA' : 'PEORIA, ILLINOIS',
'KPIB' : 'HATTIESBURG, MISSISSIPPI',
'KPIE' : 'ST PETERSBURG-CLEARWATER, FLORIDA',
'KPIH' : 'POCATELLO, IDAHOAHO',
'KPIL' : 'PORT ISABEL, TEXAS',
'KPIR' : 'PIERRE, SOUTH DAKOTA',
'KPIT' : 'PITTSBURGH, PENNSYLVANIA',
'KPKB' : 'PARKERSBURG, WEST VIRGINIA',
'KPKD' : 'PARK RAPIDS, MINNESOTA',
'KPKF' : 'PARK FALLS, WISCONSIN',
'KPKV' : 'PORT LAVACA, TEXAS',
'KPLB' : 'PLATTSBURGH, NEW YORK',
'KPLN' : 'PELLSTON, MICHIGAN',
'KPMD' : 'PALMDALE, CALIFORNIA',
'KPMP' : 'POMPANO BEACH, FLORIDA',
'KPMV' : 'PLATTSMOUTH, NEBRASKA',
'KPNA' : 'PINEDALE, WYOMING',
'KPNC' : 'PONCA CITY, OKLAHOMA',
'KPNE' : 'PHILADELPHIA, PENNSYLVANIA',
'KPNM' : 'PRINCETON, MINNESOTA',
'KPNS' : 'PENSACOLA, FLORIDA',
'KPNT' : 'PONTIAC, ILLINOIS',
'KPOB' : 'FORT BRAGG, NORTH CAROLINA',
'KPOC' : 'LA VERNE, CALIFORNIA',
'KPOE' : 'FORT POLK, LOUISANA',
'KPOF' : 'POPLAR BLUFF, MISSOURI',
'KPOU' : 'POUGHKEEPSIE, NEW YORK',
'KPPA' : 'PAMPA, TEXAS',
'KPPF' : 'PARSONS, KANSAS',
'KPPQ' : 'PITTSFIELD, ILLINOIS',
'KPQI' : 'PRESQUE ISLE, MAINE',
'KPQL' : 'PASCAGOULA, MISSISSIPPI',
'KPQN' : 'PIPESTONE, MINNESOTA',
'KPRB' : 'PASO ROBLES, CALIFORNIA',
'KPRC' : 'PRESCOTT, ARIZONA',
'KPRG' : 'PARIS, ILLINOIS',
'KPRN' : 'GREENVILLE, ALABAMA',
'KPRX' : 'PARIS, TEXAS',
'KPSC' : 'PASCO, WASHINGTON',
'KPSF' : 'PITTSFIELD, MASSACHUSETTS',
'KPSK' : 'DUBLIN, VIRGINIA',
'KPSM' : 'PORTSMOUTH, NEW HAMPSHIRE',
'KPSN' : 'PALESTINE, TEXAS',
'KPSP' : 'PALM SPRINGS, CALIFORNIA',
'KPSX' : 'PALACIOS, TEXAS',
'KPTB' : 'PETERSBURG, VIRGINIA',
'KPTK' : 'PONTIAC, MICHIGAN',
'KPTN' : 'PATTERSON, LOUISIANA',
'KPTT' : 'PRATT, KANSAS',
'KPTV' : 'PORTERVILLE, CALIFORNIA',
'KPTW' : 'POTTSTOWN, PENNSYLVANIA',
'KPUB' : 'PUEBLO, COLORADO',
'KPUC' : 'PRICE, UTAH',
'KPUW' : 'PULLMAN, WASHINGTON',
'KPVC' : 'PROVINCETOWN, MASSACHUSETTS',
'KPVD' : 'PROVIDENCE, RHODE ISLAND',
'KPVJ' : 'PAULS VALLEY, OKLAHOMA',
'KPVU' : 'PROVO, UTAH',
'KPVW' : 'PLAINVIEW, TEXAS',
'KPWA' : 'OKLAHOMA CITY, OKLAHOMA',
'KPWC' : 'PINE RIVER, MINNESOTA',
'KPWG' : 'WACO, TEXAS',
'KPWK' : 'CHICAGO, ILLINOIS',
'KPWM' : 'PORTLAND, MAINE',
'KPWT' : 'BREMERTON, WASHINGTON',
'KPYM' : 'PLYMOUTH, MASSACHUSETTS',
'KPYX' : 'PERRYTON, TEXAS',
'KPZQ' : 'ROGERS CITY, MICHIGAN',
'KQCA' : 'GRANITE PEAK, UTAH',
'KQCB' : 'GRANITE PEAK, UTAH',
'KRAC' : 'RACINE, WISCONSIN',
'KRAD' : 'WARROAD, MINNESOTA',
'KRAL' : 'RIVERSIDE, CALIFORNIA',
'KRAP' : 'RAPID CITY, SOUTH DAKOTA',
'KRAS' : 'PORT ARANSAS, TEXAS',
'KRBD' : 'DALLAS, TEXAS',
'KRBG' : 'ROSEBURG, OREGON',
'KRBL' : 'RED BLUFF, CALIFORNIA',
'KRBO' : 'ROBSTOWN, TEXAS',
'KRCA' : 'RAPID CITY, SOUTH DAKOTA',
'KRCX' : 'LADYSMITH, WISCONSIN',
'KRDD' : 'REDDING, CALIFORNIA',
'KRDG' : 'READING, PENNSYLVANIA',
'KRDK' : 'RED OAK, IOWA',
'KRDM' : 'REDMOND, OREGON',
'KRDR' : 'GRAND FORKS, NORTH DAKOTA',
'KRDU' : 'RALEIGH-DURHAM, NORTH CAROLINA',
'KRED' : 'RED LODGE, MONTANA',
'KREE' : 'JANESVILLE, WISCONSIN',
'KREO' : 'ROME, OREGON',
'KRFD' : 'CHICAGO, ILLINOIS',
'KRGK' : 'RED WING, MINNESOTA',
'KRHI' : 'RHINELANDER, WISCONSIN',
'KRHP' : 'ANDREWS, NORTH CAROLINA',
'KRHV' : 'SAN JOSE, CALIFORNIA',
'KRIC' : 'RICHMOND, VIRGINIA',
'KRIL' : 'RIFLE, COLORADO',
'KRIV' : 'RIVERSIDE, CALIFORNIA',
'KRIW' : 'RIVERTON, WYOMING',
'KRKD' : 'ROCKLAND, MAINE',
'KRKP' : 'ROCKPORT, TEXAS',
'KRKR' : 'POTEAU, OKLAHOMA',
'KRKS' : 'ROCK SPRINGS, WYOMING',
'KRLX' : 'CHARLESTON, WEST VIRGINIA',
'KRME' : 'ROME, NEW YORK',
'KRMG' : 'ROME, GEORGIA',
'KRMN' : 'STAFFORD, VIRGINIA',
'KRMY' : 'MARSHALL, MICHIGAN',
'KRND' : 'SAN ANTONIO, TEXAS',
'KRNH' : 'NEW RICHMOND, WISCONSIN',
'KRNM' : 'RAMONA, CALIFORNIA',
'KRNO' : 'RENO, NEVADA',
'KRNP' : 'OWOSSO, MICHIGAN',
'KRNT' : 'RENTON, WASHINGTON',
'KROA' : 'ROANOKE, VIRGINIA',
'KROC' : 'ROCHESTER, NEW YORK',
'KROG' : 'ROGERS, ARKANSAS',
'KROS' : 'RUSH CITY, MINNESOTA',
'KROW' : 'ROSWELL, NEW MEXICO',
'KROX' : 'ROSEAU, MINNESOTA',
'KRPD' : 'RICE LAKE, WISCONSIN',
'KRPH' : 'GRAHAM, TEXAS',
'KRPJ' : 'ROCHELLE, ILLINOIS',
'KRQB' : 'BIG RAPIDS, MICHIGAN',
'KRQE' : 'WINDOW ROCK, ARIZONA',
'KRQO' : 'EL RENO, OKLAHOMA',
'KRRL' : 'MERRILL, WISCONSIN',
'KRRT' : 'WARROAD, MINNESOTA',
'KRSL' : 'RUSSELL, KANSAS',
'KRSN' : 'RUSTON, LOUISIANA',
'KRST' : 'ROCHESTER, MINNESOTA',
'KRSV' : 'ROBINSON, ILLINOIS',
'KRSW' : 'FORT MYERS, FLORIDA',
'KRTN' : 'RATON, NEW MEXICO',
'KRUE' : 'RUSSELLVILLE, ARKANSAS',
'KRUG' : 'RUGBY, NORTH DAKOTA',
'KRUQ' : 'SALISBURY, NORTH CAROLINA',
'KRUT' : 'RUTLAND, VERMONT',
'KRVL' : 'REEDSVILLE, PENNSYLVANIA',
'KRVS' : 'TULSA, OKLAHOMA',
'KRWF' : 'REDWOOD FALLS, MINNESOTA',
'KRWI' : 'ROCKY MOUNT, NORTH CAROLINA',
'KRWL' : 'RAWLINS, WYOMING',
'KRWV' : 'CALDWELL, TEXAS',
'KRXE' : 'REXBURG, IDAHOAHO',
'KRYV' : 'WATERTOWN, WISCONSIN',
'KRYY' : 'ATLANTA, GEORGIA',
'KRZN' : 'SIREN, WISCONSIN',
'KRZZ' : 'ROANOKE RAPIDS, NORTH CAROLINA',
'KS21' : 'SUNRIVER, OREGON',
'KS25' : 'WATFORD CITY, NORTH DAKOTA',
'KS32' : 'COOPERSTOWN, NORTH DAKOTA',
'KS47' : 'HATHAWAY MEAD, OREGON',
'KS80' : 'GRANGEVILLE, IDAHO',
'KS88' : 'ARLINGTON, WASHINGTON',
'KSAC' : 'SACRAMENTO, CALIFORNIA',
'KSAD' : 'SAFFORD, ARIZONA',
'KSAF' : 'SANTA FE, NEW MEXICO',
'KSAN' : 'SAN DIEGO, CALIFORNIA',
'KSAR' : 'SPARTA, ILLINOIS',
'KSAT' : 'SAN ANTONIO, TEXAS',
'KSAV' : 'SAVANNAH, GEORGIA',
'KSAW' : 'MARQUETTE, MICHIGAN',
'KSAZ' : 'STAPLES, MINNESOTA',
'KSBA' : 'SANTA BARBARA, CALIFORNIA',
'KSBD' : 'SAN BERNARDINO, CALIFORNIA',
'KSBM' : 'SHEBOYGAN, WISCONSIN',
'KSBN' : 'SOUTH BEND, INDIANA',
'KSBP' : 'SAN LUIS OBISPO, CALIFORNIA',
'KSBS' : 'STEAMBOAT SPRINGS, COLORADO',
'KSBY' : 'SALISBURY, MARYLAND',
'KSCH' : 'SCHENECTADY, NEW YORK',
'KSCK' : 'STOCKTON, CALIFORNIA',
'KSDA' : 'SHENANDOAH, IOWA',
'KSDB' : 'SANDBERG, CALIFORNIA',
'KSDF' : 'LOUISVILLE, KENTUCKY',
'KSDL' : 'SCOTTSDALE, ARIZONA',
'KSDM' : 'SAN DIEGO, CALIFORNIA',
'KSDY' : 'SIDNEY, MONTANA',
'KSEA' : 'SEATTLE, WASHINGTON',
'KSEE' : 'SAN DIEGO, CALIFORNIA',
'KSEG' : 'SELINSGROVE, PENNSYLVANIA',
'KSEM' : 'SELMA, ALABAMA',
'KSEP' : 'STEPHENVILLE, TEXAS',
'KSET' : 'ST CHARLES, MISSOURI',
'KSEW' : 'SEATTLE, WASHINGTON',
'KSEZ' : 'SEDONA, ARIZONA',
'KSFB' : 'ORLANDO, FLORIDA',
'KSFF' : 'SPOKANE, WASHINGTON',
'KSFM' : 'SANFORD, MAINE',
'KSFO' : 'SAN FRANCISCO, CALIFORNIA',
'KSFQ' : 'SUFFOLK, VIRGINIA',
'KSFY' : 'SAVANNA, ILLINOIS',
'KSFZ' : 'PAWTUCKET, RHODE ISLAND',
'KSGF' : 'SPRINGFIELD, MISSOURI',
'KSGH' : 'SPRINGFIELD, OHIO',
'KSGJ' : 'ST AUGUSTINE, FLORIDA',
'KSGR' : 'HOUSTON, TEXAS',
'KSGS' : 'SOUTH ST PAUL, MINNESOTA',
'KSGT' : 'STUTTGART, ARKANSAS',
'KSGU' : 'ST GEORGE, UTAH',
'KSHD' : 'STAUNTON, VIRGINIA',
'KSHL' : 'SHELDON, IOWA',
'KSHN' : 'SHELTON, WASHINGTON',
'KSHR' : 'SHERIDAN, WYOMING',
'KSHV' : 'SHREVEPORT, LOUISIANA',
'KSIY' : 'MONTAGUE, CALIFORNIA',
'KSJC' : 'SAN JOSE, CALIFORNIA',
'KSJN' : 'ST JOHNS, ARIZONA',
'KSJT' : 'SAN ANGELO, TEXAS',
'KSJX' : 'BEAVER ISLAND, MICHIGAN',
'KSKA' : 'SPOKANE, WASHINGTON',
'KSKF' : 'SAN ANTONIO, TEXAS',
'KSKX' : 'TAOS, NEW MEXICO',
'KSLB' : 'STORM LAKE, IOWA',
'KSLC' : 'SALT LAKE CITY, UTAH',
'KSLE' : 'SALEM, OREGON',
'KSLG' : 'SILOAM SPRINGS, ARKANSAS',
'KSLH' : 'CHEBOYGAN, MICHIGAN',
'KSLI' : 'LOS ALAMITOS, CALIFORNIA',
'KSLK' : 'SARANAC LAKE, NEW YORK',
'KSLN' : 'SALINA, KANSAS',
'KSLO' : 'SALEM, ILLINOIS',
'KSLR' : 'SULPHUR SPRINGS, TEXAS',
'KSME' : 'SOMERSET, KENTUCKY',
'KSMF' : 'SACRAMENTO, CALIFORNIA',
'KSMN' : 'SALMON, IDAHOAHO',
'KSMO' : 'SANTA MONICA, CALIFORNIA',
'KSMQ' : 'SOMERVILLE, NEW JERSEY',
'KSMX' : 'SANTA MARIA, CALIFORNIA',
'KSNA' : 'SANTA ANA, CALIFORNIA',
'KSNC' : 'CHESTER, CONNECTICUT',
'KSNK' : 'SNYDER, TEXAS',
'KSNL' : 'SHAWNEE, OKLAHOMA',
'KSNS' : 'SALINAS, CALIFORNIA',
'KSNT' : 'STANLEY, IDAHO',
'KSNY' : 'SIDNEY, NEBRASKA',
'KSOA' : 'SONORA, TEXAS',
'KSOP' : 'PINEHURST, NORTH CAROLINA',
'KSOW' : 'SHOW LOW, ARIZONA',
'KSPA' : 'SPARTANBURG, SOUTH CAROLINA',
'KSPB' : 'SCAPPOOSE, OREGON',
'KSPD' : 'SPRINGFIELD, COLORADO',
'KSPF' : 'SPEARFISH, SOUTH DAKOTA',
'KSPG' : 'ST PETERSBURG, FLORIDA',
'KSPI' : 'SPRINGFIELD, ILLINOIS',
'KSPL' : 'SOUTH PADRE ISLAND, TEXAS',
'KSPS' : 'WICHITA FALLS, TEXAS',
'KSPW' : 'SPENCER, IOWA',
'KSQI' : 'STERLING, ILLINOIS',
'KSQL' : 'SAN CARLOS, CALIFORNIA',
'KSRC' : 'SEARCY, ARKANSAS',
'KSRE' : 'SEMINOLE, OKLAHOMA',
'KSRQ' : 'SARASOTA, FLORIDA',
'KSRR' : 'RUIDOSO, NEW MEXICO',
'KSSC' : 'SUMTER, SOUTH CAROLINA,',
'KSSF' : 'SAN ANTONIO, TEXAS',
'KSSI' : 'BRUNSWICK, GEORGIA',
'KSTC' : 'ST CLOUD, MINNESOTA',
'KSTE' : 'STEVENS POINT, WISCONSIN',
'KSTJ' : 'ST JOSEPH, MISSOURI',
'KSTL' : 'ST LOUIS, MISSOURI',
'KSTP' : 'ST PAUL, MINNESOTA',
'KSTS' : 'SANTA ROSA, CALIFORNIA',
'KSTT' : 'CHARLOTTE AMALIE, VIRGIN ISLANDS',
'KSTX' : 'CHRISTIANSTED, VIRGIN ISLANDS',
'KSUA' : 'STUART, FLORIDA',
'KSUE' : 'STURGEON BAY, WISCONSIN',
'KSUN' : 'HAILEY, IDAHOAHO',
'KSUS' : 'ST LOUIS, MISSOURI',
'KSUT' : 'OAK ISLAND, NORTH CAROLINA',
'KSUU' : 'FAIRFIELD, CALIFORNIA',
'KSUW' : 'SUPERIOR, WISCONSIN',
'KSUX' : 'SIOUX CITY, IOWA',
'KSVC' : 'SILVER CITY, NEW MEXICO',
'KSVE' : 'SUSANVILLE, CALIFORNIA',
'KSVH' : 'STATESVILLE, NORTH CAROLINA',
'KSVN' : 'SAVANNAH, GEORGIA',
'KSWD' : 'SEWARD, ALASKA',
'KSWF' : 'NEWBURGH, NEW YORK',
'KSWO' : 'STILLWATER, OKLAHOMA',
'KSWW' : 'SWEETWATER, TEXAS',
'KSXT' : 'SEXTON SUMMIT, OREGON',
'KSYN' : 'STANTON, MINNESOTA',
'KSYR' : 'SYRACUSE, NEW YORK',
'KSZL' : 'KNOB NOSTER, MISSOURI',
'KSZN' : 'SANTA CRUZ, CALIFORNIA',
'KSZT' : 'SANDPOINT, IDAHOAHO',
'KT65' : 'WESLACO, TEXAS',
'KT82' : 'FREDERICKSBURG, TEXAS',
'KTAD' : 'TRINIDAD, COLORADO',
'KTAN' : 'TAUNTON, MASSACHUSETTS',
'KTAZ' : 'TAYLORVILLE, ILLINOIS',
'KTBN' : 'FORT LEONARD WOOD, MISSOURI',
'KTBR' : 'STATESBORO, GEORGIA',
'KTBW' : 'TAMPA, FLORIDA',
'KTCC' : 'TUCUMCARI, NEW MEXICO',
'KTCL' : 'TUSCALOOSA, ALABAMA',
'KTCM' : 'SEATTLE, WASHINGTON',
'KTCS' : 'TRUTH OR CONSEQUENCES, NEW MEXICO',
'KTDF' : 'ROXBORO, NORTH CAROLINA',
'KTDO' : 'TOLEDO, WASHINGTON',
'KTDZ' : 'TOLEDO, OHIO',
'KTEB' : 'TETERBORO, NEW JERSEY',
'KTEW' : 'MASON, MICHIGAN',
'KTEX' : 'TELLURIDE, COLORADO',
'KTFX' : 'GREAT FALLS, MONTANA',
'KTHV' : 'YORK, PENNSYLVANIA',
'KTIF' : 'THEDFORD, NEBRASKA',
'KTIK' : 'OKLAHOMA CITY, OKLAHOMA',
'KTIP' : 'RANTOUL, ILLINOIS',
'KTIW' : 'TACOMA, WASHINGTON',
'KTIX' : 'TITUSVILLE, FLORIDA',
'KTKC' : 'TRACY, MINNESOTA',
'KTKI' : 'DALLAS, TEXAS',
'KTKV' : 'TOMAHAWK, WISCONSIN',
'KTLH' : 'TALLAHASSEE, FLORIDA',
'KTMB' : 'MIAMI, FLORIDA',
'KTNB' : 'BOONE, NORTH CAROLINA',
'KTNU' : 'NEWTON, IOWA',
'KTNX' : 'MELLAN, NEVADA',
'KTOA' : 'TORRANCE, CALIFORNIA',
'KTOB' : 'DODGE CENTER, MINNESOTA',
'KTOI' : 'TROY, ALABAMA',
'KTOL' : 'TOLEDO, OHIO',
'KTOP' : 'TOPEKA, KANSAS',
'KTOR' : 'TORRINGTON, WYOMING',
'KTPA' : 'TAMPA, FLORIDA',
'KTPF' : 'TAMPA, FLORIDA',
'KTPH' : 'TONOPAH, NEVADA',
'KTPL' : 'TEMPLE, TEXAS',
'KTQE' : 'TEKAMAH, NEBRASKA',
'KTQH' : 'TAHLEQUAH, OKLAHOMA',
'KTRI' : 'BRISTOL, TENNESSEE',
'KTRK' : 'TRUCKEE, CALIFORNIA',
'KTRL' : 'TERRELL, TEXAS',
'KTRM' : 'PALM SPRINGS, CALIFORNIA',
'KTTA' : 'SANFORD, NORTH CAROLINA',
'KTTD' : 'PORTLAND, OREGON',
'KTTF' : 'MONROE, MICHIGAN',
'KTTN' : 'TRENTON, NEW JERSEY',
'KTTS' : 'CAPE KENNEDY, FLORIDA',
'KTUL' : 'TULSA, OKLAHOMA',
'KTUP' : 'TUPELO, MISSISSIPPI',
'KTUS' : 'TUCSON, ARIZONA',
'KTVC' : 'TRAVERSE CITY, MICHIGAN',
'KTVF' : 'THIEF RIVER FALLS, MINNESOTA',
'KTVI' : 'THOMASVILLE, GEORGIA',
'KTVL' : 'SOUTH LAKE TAHOE, CALIFORNIA',
'KTVR' : 'TALLULAH, LOUISIANA',
'KTWF' : 'TWIN FALLS, IDAHOAHO',
'KTWM' : 'TWO HARBORS, MINNESOTA',
'KTXK' : 'TEXARKANA, ARKANSAS',
'KTYR' : 'TYLER, TEXAS',
'KTYS' : 'KNOXVILLE, TENNESSEE',
'KTZR' : 'COLUMBUS, OHIO',
'KU16' : 'HILL RANGE, UTAH',
'KU24' : 'DELTA, UTAH',
'KU42' : 'SALT LAKE CITY, UTAH',
'KU67' : 'ROOSEVELT, UTAH',
'KU78' : 'SODA SPRINGS, IDAHO',
'KUAO' : 'AURORA, OREGON',
'KUCA' : 'UTICA, NEW YORK',
'KUCP' : 'NEW CASTLE, PENNSYLVANIA',
'KUDG' : 'DARLINGTON, SOUTH CAROLINA',
'KUES' : 'WAUKESHA, WISCONSIN',
'KUGN' : 'CHICAGO, ILLINOIS',
'KUIL' : 'QUILLAYUTE, WASHINGTON',
'KUIN' : 'QUINCY, ILLINOIS',
'KUKF' : 'NORTH WILKESBORO, NORTH CAROLINA',
'KUKI' : 'UKIAH, CALIFORNIA',
'KUKL' : 'BURLINGTON, KANSAS',
'KUKT' : 'QUAKERTOWN, PENNSYLVANIA',
'KULM' : 'NEW ULM, MINNESOTA',
'KUNO' : 'WEST PLAINS, MISSOURI',
'KUNU' : 'JUNEAU, WISCONSIN',
'KUNV' : 'STATE COLLEGE, PENNSYLVANIA',
'KUOX' : 'OXFORD, MISSISSIPPI',
'KUTA' : 'TUNICA, MISSISSIPPI',
'KUTS' : 'HUNTSVILLE, TEXAS',
'KUUU' : 'NEWPORT, RHODE ISLAND',
'KUVA' : 'UVALDE, TEXAS',
'KUZA' : 'ROCK HILL, SOUTH CAROLINA',
'KVAD' : 'VALDOSTA, GEORGIA',
'KVAY' : 'MOUNT HOLLY, NEW JERSEY',
'KVBG' : 'VANDENBERG, CALIFORNIA',
'KVBT' : 'BENTONVILLE, ARKANSAS',
'KVCB' : 'VACAVILLE, CALIFORNIA',
'KVCT' : 'VICTORIA, TEXAS',
'KVCV' : 'VICTORVILLE, CALIFORNIA',
'KVDF' : 'TAMPA, FLORIDA',
'KVDI' : 'VIDALIA, GEORGIA',
'KVDW' : 'VEDAUWOO, WYOMING',
'KVEL' : 'VERNAL, UTAH',
'KVGT' : 'LAS VEGAS, NEVADA',
'KVIH' : 'ROLLA, MISSOURI',
'KVIS' : 'VISALIA, CALIFORNIA',
'KVJI' : 'ABINGDON, VIRGINIA',
'KVKS' : 'VICKSBURG, MISSISSIPPI',
'KVLD' : 'VALDOSTA, GEORGIA',
'KVLL' : 'TROY, MICHIGAN',
'KVNY' : 'VAN NUYS, CALIFORNIA',
'KVOK' : 'VOLK, WISCONSIN',
'KVPC' : 'CARTERSVILLE, GEORGIA',
'KVPS' : 'VALPARAISO, FLORIDA',
'KVPZ' : 'VALPARAISO, INDIANA',
'KVQQ' : 'JACKSONVILLE, FLORIDA',
'KVRB' : 'VERO BEACH, FLORIDA',
'KVSF' : 'SPRINGFIELD, VERMONT',
'KVTA' : 'NEWARK, OHIO',
'KVTI' : 'VINTON, IOWA',
'KVTN' : 'VALENTINE, NEBRASKA',
'KVTP' : 'LA VETA PASS, COLORADO',
'KVUJ' : 'ALBEMARLE, NORTH CAROLINA',
'KVUO' : 'VANCOUVER, WASHINGTON',
'KVVG' : 'LADY LAKE, FLORIDA',
'KVVV' : 'ORTONVILLE, MINNESOTA',
'KVWU' : 'WASKISH, MINNESOTA',
'KVYS' : 'PERU, ILLINOIS',
'KW22' : 'BUCKHANNON, WEST VIRGINIA',
'KW39' : 'ROCHE HARBOR SPB, WASHINGTON',
'KW45' : 'LURAY, VIRGINIA',
'KW51' : 'CAPE CHARLES, VIRGINIA',
'KW63' : 'CLARKSVILLE, VIRGINIA',
'KW99' : 'TOWN HILL, WEST VIRGINIA',
'KWAL' : 'CHINCOTEAGUE, VIRGINIA',
'KWDG' : 'ENID, OKLAHOMA',
'KWDR' : 'WINDER, GEORGIA',
'KWHP' : 'LOS ANGELES, CALIFORNIA',
'KWJF' : 'LANCASTER, CALIFORNIA',
'KWLD' : 'WINFIELD, KANSAS',
'KWMC' : 'WINNEMUCCA, NEVADA',
'KWRB' : 'WARNER ROBINS, GEORGIA',
'KWRI' : 'WRIGHTSTOWN, NEW JERSEY',
'KWRL' : 'WORLAND, WYOMING',
'KWST' : 'WESTERLY, RHODE ISLAND',
'KWVI' : 'WATSONVILLE, CALIFORNIA',
'KWVL' : 'WATERVILLE, MAINE',
'KWWD' : 'WILDWOOD, NEW JERSEY',
'KWWR' : 'WOODWARD, OKLAHOMA',
'KWYS' : 'WEST YELLOWSTONE, MONTANA',
'KX21' : 'TITUSVILLE, FLORIDA',
'KXBP' : 'BRIDGEPORT, TEXAS',
'KXMR' : 'COCOA BEACH, FLORIDA',
'KXNA' : 'FAYETTEVILLE, ARKANSAS',
'KXVG' : 'LONGVILLE, MINNESOTA',
'KY19' : 'MANDAN, NORTH DAKOTA',
'KY50' : 'WAUTOMA, WISCONSIN',
'KY51' : 'VIROQUA, WISCONSIN',
'KY63' : 'ELBOW LAKE, MINNESOTA',
'KYIP' : 'DETROIT, MICHIGAN',
'KYKM' : 'YAKIMA, WASHINGTON',
'KYKN' : 'YANKTON, SOUTH DAKOTA',
'KYNG' : 'YOUNGSTOWN, OHIO',
'KYUM' : 'YUMA, ARIZONA',
'KZAB' : 'ALBUQUERQUE, NEW MEXICO',
'KZLC' : 'SALT LAKE CITY, UTAH',
'KZSE' : 'AUBURN, WASHINGTON',
'KZZV' : 'ZANESVILLE, OHIO',
'PAAK' : 'ATKA, ALASKA',
'PAAP' : 'PORT ALEXANDER, ALASKA',
'PAAQ' : 'PALMER, ALASKA',
'PABA' : 'BARTER ISLAND LRRS, ALASKA',
'PABE' : 'BETHEL, ALASKA',
'PABG' : 'BELUGA, ALASKA',
'PABI' : 'DELTA JUNCTION, ALASKA',
'PABL' : 'BUCKLAND, ALASKA',
'PABN' : 'NABESNA, ALASKA',
'PABR' : 'BARROW, ALASKA',
'PABT' : 'BETTLES, ALASKA',
'PABV' : 'BIRCHWOOD, ALASKA',
'PACD' : 'COLD BAY, ALASKA',
'PACE' : 'CENTRAL, ALASKA',
'PACL' : 'CLEAR, ALASKA',
'PACP' : 'CAPE SAINT ELIAS, ALASKA',
'PACR' : 'CIRCLE, ALASKA',
'PACS' : 'CAPE SARICHEF, ALASKA',
'PACV' : 'CORDOVA, ALASKA',
'PACY' : 'YAKATAGA, ALASKA',
'PACZ' : 'CAPE ROMANZOFF, ALASKA',
'PADE' : 'DEERING, ALASKA',
'PADK' : 'ADAK ISLAND, ALASKA',
'PADL' : 'DILLINGHAM, ALASKA',
'PADQ' : 'KODIAK, ALASKA',
'PADT' : 'SLANA, ALASKA',
'PADU' : 'UNALASKA, ALASKA',
'PAEC' : 'CHULITNA, ALASKA',
'PAED' : 'ANCHORAGE, ALASKA',
'PAEG' : 'EAGLE, ALASKA',
'PAEH' : 'CAPE NEWENHAM, ALASKA',
'PAEI' : 'FAIRBANKS, ALASKA',
'PAEL' : 'ELFIN COVE, ALASKA',
'PAEM' : 'EMMONAK, ALASKA',
'PAEN' : 'KENAI, ALASKA',
'PAER' : 'MERRILL PASS WEST, ALASKA',
'PAFA' : 'FAIRBANKS, ALASKA',
'PAFB' : 'FAIRBANKS, ALASKA',
'PAFE' : 'KAKE, ALASKA',
'PAFK' : 'FAREWELL LAKE, ALASKA',
'PAFM' : 'AMBLER, ALASKA',
'PAFR' : 'FORT RICHARDSON, ALASKA',
'PAFW' : 'FAREWELL, ALASKA',
'PAGA' : 'GALENA, ALASKA',
'PAGB' : 'GALBRAITH LAKE, ALASKA',
'PAGK' : 'GULKANA, ALASKA',
'PAGL' : 'GOLOVIN, ALASKA',
'PAGM' : 'GAMBELL, ALASKA',
'PAGN' : 'ANGOON, ALASKA',
'PAGS' : 'GUSTAVUS, ALASKA',
'PAGT' : 'NIGHTMUTE, ALASKA',
'PAGY' : 'SKAGWAY, ALASKA',
'PAHL' : 'HUSLIA, ALASKA',
'PAHN' : 'HAINES, ALASKA',
'PAHO' : 'HOMER, ALASKA',
'PAHP' : 'HOOPER BAY, ALASKA',
'PAHV' : 'HEALY, ALASKA',
'PAHY' : 'HYDABURG, ALASKA',
'PAHZ' : 'HAYES RIVER, ALASKA',
'PAII' : 'EGEGIK, ALASKA',
'PAIK' : 'KIANA, ALASKA',
'PAIL' : 'ILIAMNA, ALASKA',
'PAIM' : 'UTOPIA CREEK, ALASKA',
'PAIN' : 'MCKINLEY PARK, ALASKA',
'PAIW' : 'WALES, ALASKA',
'PAIZ' : 'LAZY MTN, ALASKA',
'PAJB' : 'BIORKA ISLAND, ALASKA',
'PAJC' : 'CHIGNIK, ALASKA',
'PAJN' : 'JUNEAU, ALASKA',
'PAJO' : 'CAPE HINCHINBROOK, ALASKA',
'PAJV' : 'SUTTON, ALASKA',
'PAKI' : 'KIPNUK, ALASKA',
'PAKK' : 'KOYUK, ALASKA',
'PAKN' : 'KING SALMON, ALASKA',
'PAKO' : 'NIKOLSKI, ALASKA',
'PAKP' : 'ANAKTUVUK PASS, ALASKA',
'PAKT' : 'KETCHIKAN, ALASKA',
'PAKU' : 'KUPARUK, ALASKA',
'PAKV' : 'KALTAG, ALASKA',
'PAKW' : 'KLAWOCK, ALASKA',
'PALH' : 'ANCHORAGE, ALASKA',
'PALJ' : 'PORT ALSWORTH, ALASKA',
'PALK' : 'SNOWSHOE LAKE, ALASKA',
'PALP' : 'DEADHORSE, ALASKA',
'PALR' : 'CHANDALAR LAKE, ALASKA',
'PALU' : 'CAPE LISBURNE, ALASKA',
'PALV' : 'BIG RIVER LAKE, ALASKA',
'PAMC' : 'MCGRATH, ALASKA',
'PAMD' : 'MIDDLETON ISLAND, ALASKA',
'PAMH' : 'MINCHUMINA, ALASKA',
'PAML' : 'MANLEY HOT SPRINGS, ALASKA',
'PAMM' : 'METLAKATLA, ALASKA',
'PAMR' : 'ANCHORAGE, ALASKA',
'PAMX' : 'MCCARTHY, ALASKA',
'PAMY' : 'MEKORYUK, ALASKA',
'PANC' : 'ANCHORAGE, ALASKA',
'PANI' : 'ANIAK, ALASKA',
'PANN' : 'NENANA, ALASKA',
'PANR' : 'FUNTER BAY, ALASKA',
'PANT' : 'ANNETTE ISLAND, ALASKA',
'PANV' : 'ANVIK, ALASKA',
'PAOH' : 'HOONAH, ALASKA',
'PAOM' : 'NOME, ALASKA',
'PAOR' : 'NORTHWAY, ALASKA',
'PAOT' : 'KOTZEBUE, ALASKA',
'PAPB' : 'ST GEORGE, ALASKA',
'PAPC' : 'PORT CLARENCE, ALASKA',
'PAPG' : 'PETERSBURG, ALASKA',
'PAPH' : 'PORT HEIDEN, ALASKA',
'PAPM' : 'PLATINUM, ALASKA',
'PAPO' : 'POINT HOPE, ALASKA',
'PAPR' : 'PROSPECT CREEK, ALASKA',
'PAPT' : 'PUNTILLA, ALASKA',
'PAQT' : 'NUIQSUT, ALASKA',
'PARC' : 'ARCTIC VILLAGE, ALASKA',
'PARD' : 'RED DOG MINE, ALASKA',
'PARL' : 'CENTRAL, ALASKA',
'PASA' : 'SAVOONGA, ALASKA',
'PASC' : 'DEADHORSE, ALASKA',
'PASD' : 'SAND POINT, ALASKA',
'PASH' : 'SHISHMAREF, ALASKA',
'PASI' : 'SITKA, ALASKA',
'PASK' : 'SELAWIK, ALASKA',
'PASL' : 'SLEETMUTE, ALASKA',
'PASM' : 'ST MARY\'S, ALASKA',
'PASN' : 'ST PAUL ISLAND, ALASKA',
'PASO' : 'SELDOVIA, ALASKA',
'PASP' : 'SHEEP MOUNTAIN, ALASKA',
'PASV' : 'SPARREVOHN, ALASKA',
'PASW' : 'SKWENTNA, ALASKA',
'PASX' : 'SOLDOTNA, ALASKA',
'PASY' : 'SHEMYA, ALASKA',
'PATA' : 'TANANA, ALASKA',
'PATC' : 'TIN CITY, ALASKA',
'PATE' : 'TELLER, ALASKA',
'PATG' : 'TOGIAK VILLAGE, ALASKA',
'PATJ' : 'TOK, ALASKA',
'PATK' : 'TALKEETNA, ALASKA',
'PATL' : 'TATALINA, ALASKA',
'PATO' : 'WHITTIER, ALASKA',
'PATW' : 'CANTWELL, ALASKA',
'PAUM' : 'UMIAT, ALASKA',
'PAUN' : 'UNALAKLEET, ALASKA',
'PAUO' : 'WILLOW, ALASKA',
'PAVD' : 'VALDEZ, ALASKA',
'PAVL' : 'KIVALINA, ALASKA',
'PAVW' : 'VALDEZ, ALASKA',
'PAWD' : 'SEWARD, ALASKA',
'PAWG' : 'WRANGELL, ALASKA',
'PAWI' : 'WAINWRIGHT, ALASKA',
'PAWN' : 'NOATAK, ALASKA',
'PAWR' : 'WHITTIER, ALASKA',
'PAWS' : 'WASILLA, ALASKA',
'PAXK' : 'PAXSON, ALASKA',
'PAYA' : 'YAKUTAT, ALASKA',
'PAZK' : 'EUREKA, ALASKA',
'PFYU' : 'FORT YUKON, ALASKA',
'PGRO' : 'ROTA ISLAND, NORTH MARIANA ISLANDS',
'PGSN' : 'SAIPAN ISLAND, NORTH MARIANA ISLANDS',
'PGUM' : 'GUAM, GUAM',
'PGWT' : 'TINIAN ISLAND, NORTH MARIANA ISLANDS',
'PHBK' : 'KEKAHA, HAWAII',
'PHFO' : 'HONOLULU, HAWAII',
'PHHI' : 'WAHIAWA, HAWAII',
'PHHN' : 'HANA, HAWAII',
'PHIK' : 'HONOLULU, HAWAII',
'PHJH' : 'LAHAINA, HAWAII',
'PHJR' : 'KAPOLEI, HAWAII',
'PHKO' : 'KAILUA, HAWAII',
'PHLI' : 'LIHUE, HAWAII',
'PHMK' : 'KAUNAKAKAI, HAWAII',
'PHMO' : 'MOLOKAI, HAWAII',
'PHMU' : 'KAMUELA, HAWAII',
'PHNG' : 'KANEOHE, HAWAII',
'PHNL' : 'HONOLULU, HAWAII',
'PHNY' : 'LANAI CITY, HAWAII',
'PHOG' : 'KAHULUI, HAWAII',
'PHSF' : 'POHAKULOA, HAWAII',
'PHTO' : 'HILO, HAWAII',
'PHWH' : 'SOUTH KONA, HAWAII',
'PMDY' : 'MIDWAY ATOLL, MIDWAY ATOLL',
'POLI' : 'OLIKTOK POINT, ALASKA',
'PPIZ' : 'POINT LAY, ALASKA',
}
|
cuppa-joe/dsame
|
defs.py
|
Python
|
isc
| 274,490
|
[
"COLUMBUS",
"Dalton",
"Elk",
"MOOSE"
] |
783be82240923c562eabcc559ca8c1e2778fd73451903e387cd6a96bc0a89421
|
# http://hacktheuniverse.github.io/star-api/
from bowshock.helpers import dispatch_http_get
def stars():
'''
This endpoint gets you a list of all stars in json
'''
base_url = "http://star-api.herokuapp.com/api/v1/stars"
return dispatch_http_get(base_url)
def search_star(star):
'''
It is also possible to query the stars by label, here is an example of querying for the star labeled as Sun.
http://star-api.herokuapp.com/api/v1/stars/Sun
'''
base_url = "http://star-api.herokuapp.com/api/v1/stars/"
if not isinstance(star, str):
raise ValueError("The star arg you provided is not the type of str")
else:
base_url += star
return dispatch_http_get(base_url)
def exoplanets():
'''
gets all exoplanets in json
'''
base_url = "http://star-api.herokuapp.com/api/v1/exo_planets"
return dispatch_http_get(base_url)
def search_exoplanet(exoplanet):
'''
It is also possible to query the exoplanets by label, here is an example of querying for the exoplanet labeled as 11 Com
http://star-api.herokuapp.com/api/v1/exo_planets/11 Com
'''
base_url = "http://star-api.herokuapp.com/api/v1/exo_planets/"
if not isinstance(exoplanet, str):
raise ValueError(
"The exoplanet arg you provided is not the type of str")
else:
base_url += exoplanet
return dispatch_http_get(base_url)
def local_group_of_galaxies():
'''
gets a local group of galaxies in json
'''
base_url = "http://star-api.herokuapp.com/api/v1/local_groups"
return dispatch_http_get(base_url)
def search_local_galaxies(galaxy):
'''
It is also possible to query the local galaxies by label, here is an example of querying for the local galaxy labeled IC 10
http://star-api.herokuapp.com/api/v1/local_groups/IC 10
'''
base_url = "http://star-api.herokuapp.com/api/v1/local_groups/"
if not isinstance(galaxy, str):
raise ValueError("The galaxy arg you provided is not the type of str")
else:
base_url += galaxy
return dispatch_http_get(base_url)
def star_clusters():
'''
retrieves all open star clusters in json
'''
base_url = "http://star-api.herokuapp.com/api/v1/open_cluster"
return dispatch_http_get(base_url)
def search_star_cluster(cluster):
'''
It is also possible to query the star clusters by label, here is an example of querying for the star cluster labeled Berkeley 59
http://star-api.herokuapp.com/api/v1/open_cluster/Berkeley 59
'''
base_url = "http://star-api.herokuapp.com/api/v1/open_cluster/"
if not isinstance(cluster, str):
raise ValueError("The cluster arg you provided is not the type of str")
else:
base_url += cluster
return dispatch_http_get(base_url)
|
emirozer/bowshock
|
bowshock/star.py
|
Python
|
gpl-2.0
| 2,844
|
[
"Galaxy"
] |
f21aed1a3a80885377679c25999bbd1d28a0deffb06bd32a0664a3e39acddb43
|
# ***************************************************************************
# * Copyright (c) 2018 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "Mesh FEM unit tests"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
import unittest
from os.path import join
import FreeCAD
import Fem
from . import support_utils as testtools
from .support_utils import fcc_print
class TestMeshCommon(unittest.TestCase):
fcc_print("import TestMeshCommon")
# ********************************************************************************************
def setUp(
self
):
# setUp is executed before every test
# new document
self.document = FreeCAD.newDocument(self.__class__.__name__)
# ********************************************************************************************
def tearDown(
self
):
# tearDown is executed after every test
FreeCAD.closeDocument(self.document.Name)
# ********************************************************************************************
def test_00print(
self
):
# since method name starts with 00 this will be run first
# this test just prints a line with stars
fcc_print("\n{0}\n{1} run FEM TestMeshCommon tests {2}\n{0}".format(
100 * "*",
10 * "*",
60 * "*"
))
# ********************************************************************************************
def test_mesh_seg2_python(
self
):
seg2 = Fem.FemMesh()
seg2.addNode(0, 0, 0, 1)
seg2.addNode(2, 0, 0, 2)
seg2.addNode(4, 0, 0, 3)
seg2.addEdge([1, 2])
seg2.addEdge([2, 3], 2)
node_data = [
seg2.NodeCount,
seg2.Nodes
]
edge_data = [
seg2.EdgeCount,
seg2.Edges[0],
seg2.getElementNodes(seg2.Edges[0]),
seg2.Edges[1],
seg2.getElementNodes(seg2.Edges[1])
]
expected_nodes = [
3,
{
1: FreeCAD.Vector(0.0, 0.0, 0.0),
2: FreeCAD.Vector(2.0, 0.0, 0.0),
3: FreeCAD.Vector(4.0, 0.0, 0.0)
}
]
expected_edges = [2, 1, (1, 2), 2, (2, 3)]
self.assertEqual(
node_data,
expected_nodes,
"Nodes of Python created seg2 element are unexpected"
)
self.assertEqual(
edge_data, expected_edges,
"Edges of Python created seg2 element are unexpected"
)
# ********************************************************************************************
def test_mesh_seg3_python(
self
):
seg3 = Fem.FemMesh()
seg3.addNode(0, 0, 0, 1)
seg3.addNode(1, 0, 0, 2)
seg3.addNode(2, 0, 0, 3)
seg3.addNode(3, 0, 0, 4)
seg3.addNode(4, 0, 0, 5)
seg3.addEdge([1, 3, 2])
seg3.addEdge([3, 5, 4], 2)
node_data = [seg3.NodeCount, seg3.Nodes]
edge_data = [
seg3.EdgeCount,
seg3.Edges[0],
seg3.getElementNodes(seg3.Edges[0]),
seg3.Edges[1],
seg3.getElementNodes(seg3.Edges[1])
]
expected_nodes = [
5, {
1: FreeCAD.Vector(0.0, 0.0, 0.0),
2: FreeCAD.Vector(1.0, 0.0, 0.0),
3: FreeCAD.Vector(2.0, 0.0, 0.0),
4: FreeCAD.Vector(3.0, 0.0, 0.0),
5: FreeCAD.Vector(4.0, 0.0, 0.0)
}
]
expected_edges = [2, 1, (1, 3, 2), 2, (3, 5, 4)]
self.assertEqual(
node_data,
expected_nodes,
"Nodes of Python created seg3 element are unexpected"
)
self.assertEqual(
edge_data,
expected_edges,
"Edges of Python created seg3 element are unexpected"
)
# ********************************************************************************************
def test_unv_save_load(
self
):
tetra10 = Fem.FemMesh()
tetra10.addNode(6, 12, 18, 1)
tetra10.addNode(0, 0, 18, 2)
tetra10.addNode(12, 0, 18, 3)
tetra10.addNode(6, 6, 0, 4)
tetra10.addNode(3, 6, 18, 5)
tetra10.addNode(6, 0, 18, 6)
tetra10.addNode(9, 6, 18, 7)
tetra10.addNode(6, 9, 9, 8)
tetra10.addNode(3, 3, 9, 9)
tetra10.addNode(9, 3, 9, 10)
tetra10.addVolume([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
unv_file = join(testtools.get_fem_test_tmp_dir("mesh_common_unv_save"), "tetra10_mesh.unv")
tetra10.write(unv_file)
newmesh = Fem.read(unv_file)
expected = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
self.assertEqual(
newmesh.getElementNodes(1),
expected,
"Nodes order of quadratic volume element is unexpected"
)
# ********************************************************************************************
def test_writeAbaqus_precision(
self
):
# https://forum.freecadweb.org/viewtopic.php?f=18&t=22759#p176669
# ccx reads only F20.0 (i. e. Fortran floating point field 20 chars wide)
# thus precision is set to 13 in writeAbaqus
seg2 = Fem.FemMesh()
seg2.addNode(0, 0, 0, 1)
seg2.addNode(
# 3456789012345678901234567
-5000000000000000000.1,
-1.123456789123456e-14,
-0.1234567890123456789e-101,
2
)
seg2.addEdge([1, 2])
inp_file = join(testtools.get_fem_test_tmp_dir("mesh_common_inp_preci"), "seg2_mesh.inp")
seg2.writeABAQUS(inp_file, 1, False)
read_file = open(inp_file, "r")
read_node_line = "line was not found"
for ln in read_file:
ln = ln.strip()
if ln.startswith("2, -5"):
read_node_line = ln
read_file.close()
# 1234567 12345678901234567890 12345678901234567890
expected_win = "2, -5e+018, -1.123456789123e-014, -1.234567890123e-102"
expected_lin = "2, -5e+18, -1.123456789123e-14, -1.234567890123e-102"
expected = [expected_lin, expected_win]
self.assertTrue(
True if read_node_line in expected else False,
"Problem in test_writeAbaqus_precision, \n{0}\n{1}".format(
read_node_line,
expected
)
)
# ************************************************************************************************
# ************************************************************************************************
class TestMeshEleTetra10(unittest.TestCase):
fcc_print("import TestMeshEleTetra10")
# ********************************************************************************************
def setUp(
self
):
# setUp is executed before every test
# new document
self.document = FreeCAD.newDocument(self.__class__.__name__)
# more inits
self.elem = "tetra10"
self.base_testfile = join(
testtools.get_fem_test_home_dir(),
"mesh",
(self.elem + "_mesh.")
)
# 10 node tetrahedron --> tetra10
femmesh = Fem.FemMesh()
femmesh.addNode(6, 12, 18, 1)
femmesh.addNode(0, 0, 18, 2)
femmesh.addNode(12, 0, 18, 3)
femmesh.addNode(6, 6, 0, 4)
femmesh.addNode(3, 6, 18, 5)
femmesh.addNode(6, 0, 18, 6)
femmesh.addNode(9, 6, 18, 7)
femmesh.addNode(6, 9, 9, 8)
femmesh.addNode(3, 3, 9, 9)
femmesh.addNode(9, 3, 9, 10)
femmesh.addVolume([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
self.femmesh = femmesh
self.expected_nodes = {
"count": 10,
"nodes": {
1: FreeCAD.Vector(6.0, 12.0, 18.0),
2: FreeCAD.Vector(0.0, 0.0, 18.0),
3: FreeCAD.Vector(12.0, 0.0, 18.0),
4: FreeCAD.Vector(6.0, 6.0, 0.0),
5: FreeCAD.Vector(3.0, 6.0, 18.0),
6: FreeCAD.Vector(6.0, 0.0, 18.0),
7: FreeCAD.Vector(9.0, 6.0, 18.0),
8: FreeCAD.Vector(6.0, 9.0, 9.0),
9: FreeCAD.Vector(3.0, 3.0, 9.0),
10: FreeCAD.Vector(9.0, 3.0, 9.0),
}
}
self.expected_elem = {
"volcount": 1,
"tetcount": 1,
"volumes": [1, (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)]
}
"""
fcc_print("\n")
fcc_print(expected_nodes)
fcc_print(expected_elem)
fcc_print("\n")
"""
# ********************************************************************************************
def tearDown(
self
):
# tearDown is executed after every test
FreeCAD.closeDocument(self.document.Name)
# ********************************************************************************************
def test_00print(
self
):
# since method name starts with 00 this will be run first
# this test just prints a line with stars
fcc_print("\n{0}\n{1} run FEM TestMeshEleTetra10 tests {2}\n{0}".format(
100 * "*",
10 * "*",
56 * "*"
))
# ********************************************************************************************
def get_file_paths(
self,
file_extension
):
testfile = self.base_testfile + file_extension
outfile = join(
testtools.get_fem_test_tmp_dir("mesh_elements_" + self.elem + "_" + file_extension),
self.elem + "_mesh." + file_extension
)
# fcc_print("\n")
# fcc_print(outfile)
# fcc_print(testfile)
return (outfile, testfile)
# ********************************************************************************************
def compare_mesh_files(
self,
femmesh_testfile,
femmesh_outfile,
filetyp
):
# """
fcc_print([
femmesh_testfile.Volumes[0],
femmesh_testfile.getElementNodes(femmesh_outfile.Volumes[0])
])
# """
# test reading the test mesh
self.assertEqual(
femmesh_testfile.Nodes,
self.expected_nodes["nodes"],
"Test reading {} mesh to {} file failed. Nodes are different.\n".format(
self.elem,
filetyp
)
)
self.assertEqual(
[
femmesh_testfile.Volumes[0],
femmesh_testfile.getElementNodes(femmesh_outfile.Volumes[0])
],
self.expected_elem["volumes"],
"Test reading {} mesh to {} file failed. Volumes are different.\n".format(
self.elem,
filetyp
)
)
# test reading the written mesh
self.assertEqual(
femmesh_outfile.Nodes,
self.expected_nodes["nodes"],
"Test reading {} mesh to {} file failed. Nodes are different.\n".format(
self.elem,
filetyp
)
)
self.assertEqual(
[
femmesh_outfile.Volumes[0],
femmesh_outfile.getElementNodes(femmesh_outfile.Volumes[0])
],
self.expected_elem["volumes"],
"Test reading {} mesh to {} file failed. Volumes are different.\n".format(
self.elem,
filetyp
)
)
# test if both are equal
self.assertEqual(
femmesh_outfile.Nodes,
femmesh_testfile.Nodes,
"Test reading {} mesh to {} file failed. Nodes are different.\n".format(
self.elem,
filetyp
)
)
self.assertEqual(
femmesh_outfile.Volumes,
femmesh_testfile.Volumes,
"Test reading {} mesh to {} file failed. Volumes are different.\n".format(
self.elem,
filetyp
)
)
# ********************************************************************************************
def test_tetra10_create(
self
):
# tetra10 element: creating by Python
node_data = {
"count": self.femmesh.NodeCount,
"nodes": self.femmesh.Nodes
}
elem_data = {
"volcount": self.femmesh.VolumeCount,
"tetcount": self.femmesh.TetraCount,
"volumes": [
self.femmesh.Volumes[0],
self.femmesh.getElementNodes(self.femmesh.Volumes[0])
]
}
self.assertEqual(
node_data,
self.expected_nodes,
"Nodes of Python created " + self.elem + "mesh element are unexpected"
)
self.assertEqual(
elem_data,
self.expected_elem,
"Elements of Python created " + self.elem + "mesh element are unexpected"
)
"""
obj = doc.addObject("Fem::FemMeshObject" , elem)
obj.FemMesh = femmesh
obj.Placement.Base = (30,50,0)
obj.ViewObject.DisplayMode = "Faces, Wireframe & Nodes"
"""
# ********************************************************************************************
def test_tetra10_inp(
self
):
# tetra10 element: reading from and writing to inp mesh file format
file_extension = "inp"
outfile, testfile = self.get_file_paths(file_extension)
self.femmesh.writeABAQUS(outfile, 1, False) # write the mesh
femmesh_outfile = Fem.read(outfile) # read the mesh from written mesh
femmesh_testfile = Fem.read(testfile) # read the mesh from test mesh
self.compare_mesh_files(
femmesh_testfile,
femmesh_outfile,
file_extension
)
# ********************************************************************************************
def test_tetra10_unv(
self
):
# tetra10 element: reading from and writing to unv mesh file format
file_extension = "unv"
outfile, testfile = self.get_file_paths(file_extension)
self.femmesh.write(outfile) # write the mesh
femmesh_outfile = Fem.read(outfile) # read the mesh from written mesh
femmesh_testfile = Fem.read(testfile) # read the mesh from test mesh
self.compare_mesh_files(
femmesh_testfile,
femmesh_outfile,
file_extension
)
# ********************************************************************************************
def test_tetra10_vkt(
self
):
# tetra10 element: reading from and writing to unv mesh file format
file_extension = "vtk"
outfile, testfile = self.get_file_paths(file_extension)
if "BUILD_FEM_VTK" in FreeCAD.__cmake__:
self.femmesh.write(outfile) # write the mesh
femmesh_outfile = Fem.read(outfile) # read the mesh from written mesh
femmesh_testfile = Fem.read(testfile) # read the mesh from test mesh
self.compare_mesh_files(
femmesh_testfile,
femmesh_outfile,
file_extension
)
else:
fcc_print("FEM_VTK post processing is disabled.")
# ********************************************************************************************
def test_tetra10_yml(
self
):
# tetra10 element: reading from and writing to yaml/json mesh file format
file_extension = "yml"
outfile, testfile = self.get_file_paths(file_extension)
# TODO: implement yaml/json mesh reader writer method calls in C++
# self.femmesh.write(outfile) # write the mesh
# femmesh_testfile = Fem.read(outfile) # read the mesh from written mesh
# femmesh_outfile = Fem.read(testfile) # read the mesh from test mesh
# directly use Python methods to read and write files
from feminout.importYamlJsonMesh import write
write(outfile, self.femmesh)
from feminout.importYamlJsonMesh import read
femmesh_testfile = read(outfile)
femmesh_outfile = read(testfile)
self.compare_mesh_files(
femmesh_testfile,
femmesh_outfile,
file_extension
)
# ********************************************************************************************
def test_tetra10_z88(
self
):
# tetra10 element: reading from and writing to z88 mesh file format
file_extension = "z88"
outfile, testfile = self.get_file_paths(file_extension)
self.femmesh.write(outfile) # write the mesh
femmesh_testfile = Fem.read(outfile) # read the mesh from written mesh
femmesh_outfile = Fem.read(testfile) # read the mesh from test mesh
self.compare_mesh_files(
femmesh_testfile,
femmesh_outfile,
file_extension
)
# ************************************************************************************************
# ************************************************************************************************
# TODO: add elements to group with another type. Should be empty at the end.
class TestMeshGroups(unittest.TestCase):
fcc_print("import TestMeshGroups")
# ********************************************************************************************
def setUp(
self
):
# setUp is executed before every test
# new document
self.document = FreeCAD.newDocument(self.__class__.__name__)
# ********************************************************************************************
def tearDown(
self
):
# tearDown is executed after every test
FreeCAD.closeDocument(self.document.Name)
# ********************************************************************************************
def test_00print(
self
):
# since method name starts with 00 this will be run first
# this test just prints a line with stars
fcc_print("\n{0}\n{1} run FEM TestMeshGroups tests {2}\n{0}".format(
100 * "*",
10 * "*",
57 * "*"
))
# ********************************************************************************************
def test_add_groups(self):
"""
Create different groups with different names. Check whether the
ids are correct, the names are correct, and whether the GroupCount is
correct.
"""
from femexamples.meshes.mesh_canticcx_tetra10 import create_elements
from femexamples.meshes.mesh_canticcx_tetra10 import create_nodes
fm = Fem.FemMesh()
control = create_nodes(fm)
if not control:
fcc_print("failed to create nodes")
control = create_elements(fm)
if not control:
fcc_print("failed to create elements")
# information
# fcc_print(fm)
expected_dict = {}
expected_dict["ids"] = []
expected_dict["names"] = [
"MyNodeGroup",
"MyEdgeGroup",
"MyVolumeGroup",
"My0DElementGroup",
"MyBallGroup"
]
expected_dict["types"] = [
"Node",
"Edge",
"Volume",
"0DElement",
"Ball"
]
expected_dict["count"] = fm.GroupCount + 5
result_dict = {}
mygrpids = []
for (name, typ) in zip(expected_dict["names"], expected_dict["types"]):
mygrpids.append(fm.addGroup(name, typ))
expected_dict["ids"] = sorted(tuple(mygrpids))
# fcc_print("expected dict")
# fcc_print(expected_dict)
result_dict["count"] = fm.GroupCount
result_dict["ids"] = sorted(fm.Groups)
result_dict["types"] = list([fm.getGroupElementType(g)
for g in fm.Groups])
result_dict["names"] = list([fm.getGroupName(g) for g in fm.Groups])
# fcc_print("result dict")
# fcc_print(result_dict)
self.assertEqual(
expected_dict,
result_dict,
msg="expected: {0}\n\nresult: {1}\n\n differ".format(expected_dict, result_dict)
)
def test_delete_groups(self):
"""
Adds a number of groups to FemMesh and deletes them
afterwards. Checks whether GroupCount is OK
"""
from femexamples.meshes.mesh_canticcx_tetra10 import create_elements
from femexamples.meshes.mesh_canticcx_tetra10 import create_nodes
fm = Fem.FemMesh()
control = create_nodes(fm)
if not control:
fcc_print("failed to create nodes")
control = create_elements(fm)
if not control:
fcc_print("failed to create elements")
# information
# fcc_print(fm)
old_group_count = fm.GroupCount
myids = []
for i in range(1000):
myids.append(fm.addGroup("group" + str(i), "Node"))
for grpid in myids:
fm.removeGroup(grpid)
new_group_count = fm.GroupCount
self.assertEqual(
old_group_count,
new_group_count,
msg=(
"GroupCount before and after adding and deleting groups differ: {0} != {1}"
.format(old_group_count, new_group_count)
)
)
def test_add_group_elements(self):
"""
Add a node group, add elements to it. Verify that elements added
and elements in getGroupElements are the same.
"""
from femexamples.meshes.mesh_canticcx_tetra10 import create_elements
from femexamples.meshes.mesh_canticcx_tetra10 import create_nodes
fm = Fem.FemMesh()
control = create_nodes(fm)
if not control:
fcc_print("failed to create nodes")
control = create_elements(fm)
if not control:
fcc_print("failed to create elements")
# information
# fcc_print(fm)
elements_to_be_added = [1, 2, 3, 4, 49, 64, 88, 100, 102, 188, 189, 190, 191]
myid = fm.addGroup("mynodegroup", "Node")
# fcc_print(fm.getGroupElements(myid))
fm.addGroupElements(myid, elements_to_be_added)
elements_returned = list(fm.getGroupElements(myid)) # returns tuple
# fcc_print(elements_returned)
self.assertEqual(
elements_to_be_added,
elements_returned,
msg=(
"elements to be added {0} and elements returned {1} differ".
format(elements_to_be_added, elements_returned)
)
)
|
sanguinariojoe/FreeCAD
|
src/Mod/Fem/femtest/app/test_mesh.py
|
Python
|
lgpl-2.1
| 24,566
|
[
"VTK"
] |
0ce8f8f5a8e659bb336e9468965a2e2a085d82e24162ddd0d2d4344627f72b15
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from tempfile import mkdtemp
from shutil import rmtree
import locale
import logging
import subprocess
from pelican import Pelican
from pelican.settings import read_settings
from pelican.tests.support import LoggedTestCase, mute
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(os.path.join(
CURRENT_DIR, os.pardir, os.pardir, 'samples'))
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output'))
INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py")
def recursiveDiff(dcmp):
diff = {
'diff_files': [os.path.join(dcmp.right, f)
for f in dcmp.diff_files],
'left_only': [os.path.join(dcmp.right, f)
for f in dcmp.left_only],
'right_only': [os.path.join(dcmp.right, f)
for f in dcmp.right_only],
}
for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).items():
diff[k] += v
return diff
class TestPelican(LoggedTestCase):
# general functional testing for pelican. Basically, this test case tries
# to run pelican in different situations and see how it behaves
def setUp(self):
super(TestPelican, self).setUp()
self.temp_path = mkdtemp(prefix='pelicantests.')
self.old_locale = locale.setlocale(locale.LC_ALL)
self.maxDiff = None
locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self):
rmtree(self.temp_path)
locale.setlocale(locale.LC_ALL, self.old_locale)
super(TestPelican, self).tearDown()
def assertFilesEqual(self, diff):
msg = ("some generated files differ from the expected functional "
"tests output.\n"
"This is probably because the HTML generated files "
"changed. If these changes are normal, please refer "
"to docs/contribute.rst to update the expected "
"output of the functional tests.")
self.assertEqual(diff['left_only'], [], msg=msg)
self.assertEqual(diff['right_only'], [], msg=msg)
self.assertEqual(diff['diff_files'], [], msg=msg)
def assertDirsEqual(self, left_path, right_path):
out, err = subprocess.Popen(
['git', 'diff', '--no-ext-diff', '--exit-code', '-w', left_path, right_path], env={'PAGER': ''},
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert not out, out
assert not err, err
def test_basic_generation_works(self):
# when running pelican without settings, it should pick up the default
# ones and generate correct output without raising any exception
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'LOCALE': locale.normalize('en_US'),
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'basic'))
self.assertLogCountEqual(
count=4,
msg="Unable to find.*skipping url replacement",
level=logging.WARNING)
def test_custom_generation_works(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'LOCALE': locale.normalize('en_US'),
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom'))
def test_theme_static_paths_copy(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'),
os.path.join(SAMPLES_PATH, 'kinda'),
os.path.join(SAMPLES_PATH, 'theme_standard')]
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme')
extra_path = os.path.join(theme_output, 'exciting', 'new', 'files')
for file in ['a_stylesheet', 'a_template']:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
for file in ['wow!', 'boom!', 'bap!', 'zap!']:
self.assertTrue(os.path.exists(os.path.join(extra_path, file)))
def test_theme_static_paths_copy_single_file(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'theme_standard')]
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme')
for file in ['a_stylesheet', 'a_template']:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
|
tonyseek/pelican
|
pelican/tests/test_pelican.py
|
Python
|
agpl-3.0
| 5,426
|
[
"exciting"
] |
597bf85cc39936a9b453d2712629085647a6b08ada4eb8b502485e8cee037d1b
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import copy
import os.path as op
from math import ceil
import warnings
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix, block_diag as sparse_block_diag
from .utils import deprecated
from .filter import resample
from .fixes import einsum
from .evoked import _get_peak
from .parallel import parallel_func
from .surface import (read_surface, _get_ico_surface, read_morph_map,
_compute_nearest, mesh_edges)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces)
from .utils import (get_subjects_dir, _check_subject, logger, verbose,
_time_mask, warn as warn_, copy_function_doc_to_method_doc)
from .viz import plot_source_estimates, plot_vector_source_estimates
from .io.base import ToDataFrameMixin, TimeMixin
from .externals.six import string_types
from .externals.six.moves import zip
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
"""Aux Function."""
with open(filename, 'rb') as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc['tmin'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tmin'] /= 1000.0
offset += num_bytes
# read sampling rate in ms
stc['tstep'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tstep'] /= 1000.0
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
# read the source vector
stc['vertices'] = np.frombuffer(buf, dtype=">u4", count=vertices_n,
offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
if (vertices_n and # vertices_n can be 0 (empty stc)
((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.frombuffer(buf, dtype=">f4", count=vertices_n * data_n,
offset=offset)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tostring())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tostring())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tostring())
# close the file
fid.close()
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tostring())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert(len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tostring())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tostring())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
""" # noqa: E501
fname_arg = fname
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('.h5'):
ftype = 'h5'
fname = fname[:-3]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype is not 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and (ftype is not 'w'):
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif op.exists(fname + '.h5'):
ftype = 'h5'
elif op.exists(fname + '-stc.h5'):
ftype = 'h5'
fname += '-stc'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
elif ftype == 'h5':
kwargs = read_hdf5(fname + '.h5', title='mnepython')
if "src_type" in kwargs:
ftype = kwargs['src_type']
del kwargs['src_type']
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype in ('volume', 'discrete'):
stc = VolSourceEstimate(**kwargs)
elif ftype == 'mixed':
stc = MixedSourceEstimate(**kwargs)
elif ftype == 'h5' and kwargs['data'].ndim == 3:
stc = VectorSourceEstimate(**kwargs)
else:
stc = SourceEstimate(**kwargs)
return stc
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn_("src should not be None for a robust guess of stc type.")
else:
warn_(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = 'surface'
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list)\
and len(vertices) == 1:
src_type = 'volume'
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = 'mixed'
else:
src_type = src.kind
assert src_type in ('surface', 'volume', 'mixed', 'discrete')
return src_type
def _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,
subject=None, vector=False, source_nn=None, warn_text=None):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
if src_type is None:
# attempt to guess from vertices
src_type = _get_src_type(src=None, vertices=vertices,
warn_text=warn_text)
if src_type == 'surface':
# make a surface source estimate
n_vertices = len(vertices[0]) + len(vertices[1])
if vector:
if source_nn is None:
raise RuntimeError('No source vectors supplied.')
# Rotate data to absolute XYZ coordinates
data_rot = np.zeros((n_vertices, 3, data.shape[1]))
if data.shape[0] == 3 * n_vertices:
source_nn = source_nn.reshape(n_vertices, 3, 3)
data = data.reshape(n_vertices, 3, -1)
else:
raise RuntimeError('Shape of data array does not match the '
'number of vertices.')
for i, d, n in zip(range(data.shape[0]), data, source_nn):
data_rot[i] = np.dot(n.T, d)
data = data_rot
stc = VectorSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
else:
stc = SourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
elif src_type in ('volume', 'discrete'):
if vector:
data = data.reshape((-1, 3, data.shape[-1]))
stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
elif src_type == 'mixed':
# make a mixed source estimate
stc = MixedSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
return stc
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine source estimates that do not have '
'the same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(ToDataFrameMixin, TimeMixin):
"""Abstract base class for source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array | list of two arrays
Vertex numbers corresponding to the data.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array or list of arrays of shape (n_dipoles,)
The indices of the dipoles in the different source spaces. Can
be an array if there is only one source space (e.g., for volumes).
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel and sens_data have invalid '
'dimensions')
if isinstance(vertices, list):
vertices = [np.asarray(v, int) for v in vertices]
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing '
'order.')
n_src = sum([len(v) for v in vertices])
if len(vertices) == 1:
vertices = vertices[0]
elif isinstance(vertices, np.ndarray):
n_src = len(vertices)
else:
raise ValueError('Vertices must be a list or numpy array')
# safeguard the user against doing something silly
if data is not None and data.shape[0] != n_src:
raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '
'must match' % (n_src, data.shape[0]))
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
@property
def sfreq(self):
"""Sample rate of the data."""
return 1. / self.tstep
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
def crop(self, tmin=None, tmax=None):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
self.data = resample(self.data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError('Data array should have %d dimensions.' %
self._data.ndim)
# vertices can be a single number, so cast to ndarray
if isinstance(self.vertices, list):
n_verts = sum([len(v) for v in self.vertices])
elif isinstance(self.vertices, np.ndarray):
n_verts = len(self.vertices)
else:
raise ValueError('Vertices must be a list or numpy array')
if value.shape[0] != n_verts:
raise ValueError('The first dimension of the data array must '
'match the number of vertices (%d != %d)' %
(value.shape[0], n_verts))
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError('.tstep must be greater than 0.')
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError('You cannot write to the .times attribute directly. '
'This property automatically updates whenever '
'.tmin, .tstep or .data changes.')
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean power between tmin and tmax.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (method operates inplace).
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
mean_stc = self.__class__(self.data.mean(axis=-1, keepdims=True),
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return mean_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance."""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn_('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter (see below) must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of two arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of two arrays of shape (n_dipoles,)
The indices of the dipoles in the left and right source space.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not (isinstance(vertices, list) and len(vertices) == 2):
raise ValueError('Vertices, if a list, must contain two '
'numpy arrays')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
def __repr__(self): # noqa: D105
if isinstance(self.vertices, list):
nv = sum([len(v) for v in self.vertices])
else:
nv = self.vertices.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
return "<%s | %s>" % (type(self).__name__, s)
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
elif label.hemi == 'rh':
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
else:
raise TypeError("Expected Label or BiHemiLabel; got %r" % label)
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surf')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return self.__class__(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@verbose
def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None,
buffer_size=64, n_jobs=1, subject_from=None, sparse=False,
verbose=None):
"""Morph a source estimate from one subject to another.
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
.. note :: If sparse=True, grade has to be set to None.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel.
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
sparse : bool
Morph as a sparse source estimate. If True the only
parameters used are subject_to and subject_from,
and grade has to be None.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
stc_to : SourceEstimate | VectorSourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
if sparse:
if grade is not None:
raise RuntimeError('grade must be set to None if sparse=True.')
return _morph_sparse(self, subject_from, subject_to, subjects_dir)
else:
return morph_data(subject_from, subject_to, self, grade, smooth,
subjects_dir, buffer_size, n_jobs, verbose)
def morph_precomputed(self, subject_to, vertices_to, morph_mat,
subject_from=None):
"""Morph source estimate between subjects using a precomputed matrix.
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, usually from compute_morph_matrix.
subject_from : string | None
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
Returns
-------
stc_to : SourceEstimate | VectorSourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
return morph_data_precomputed(subject_from, subject_to, self,
vertices_to, morph_mat)
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of two arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of two arrays of shape (n_dipoles,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : string
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : string
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
"""
if ftype not in ('stc', 'w', 'h5'):
raise ValueError('ftype must be "stc", "w", or "h5", not "%s"'
% ftype)
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject), title='mnepython',
overwrite=True)
logger.info('[done]')
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=None, alpha=1.0, time_viewer=False, subjects_dir=None,
figure=None, views='lat', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground="white", initial_time=None, time_unit='s',
backend='auto', spacing='oct6'):
brain = plot_source_estimates(self, subject, surface=surface,
hemi=hemi, colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar,
clim=clim, cortex=cortex, size=size,
background=background,
foreground=foreground,
initial_time=initial_time,
time_unit=time_unit, backend=backend,
spacing=spacing)
return brain
@verbose
def extract_label_time_course(self, labels, src, mode='mean_flip',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Valid values for mode are:
- 'mean': Average within each label.
- 'mean_flip': Average within each label with sign flip depending
on source orientation.
- 'pca_flip': Apply an SVD to the time courses within each label
and use the scaled and sign-flipped first right-singular vector
as the label time course. The scaling is performed such that the
power of the label time course is the same as the average
per-vertex time course power within the label. The sign of the
resulting time course is adjusted by multiplying it with
"sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This
procedure assures that the phase does not randomly change by 180
degrees from one stc to the next.
- 'max': Max value within each label.
Parameters
----------
labels : Label | BiHemiLabel | list of Label or BiHemiLabel
The labels for which to extract the time courses.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time course for
labels that do not have any vertices in the source estimate.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
label_tc : array, shape=(len(labels), n_times)
Extracted time course for each label.
See Also
--------
extract_label_time_course : extract time courses for multiple STCs
"""
label_tc = extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, verbose=verbose)
return label_tc
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
None: np.concatenate(self.vertices)}[hemi]
vert_idx, time_idx, _ = _get_peak(data, self.times, tmin, tmax, mode)
return (vert_idx if vert_as_index else vertno[vert_idx],
time_idx if time_as_index else self.times[time_idx])
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in [1]_.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : string | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, string_types):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
if hemi not in [0, 1]:
raise ValueError('hemi must be 0 or 1')
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class VolSourceEstimate(_BaseSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.9.0
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not (isinstance(vertices, np.ndarray) or
isinstance(vertices, list)):
raise ValueError('Vertices must be a numpy array or a list of '
'arrays')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : string
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : string
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
"""
if ftype not in ['stc', 'w', 'h5']:
raise ValueError('ftype must be "stc", "w" or "h5", not "%s"' %
ftype)
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices, data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices, data=self.data)
elif ftype == 'h5':
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject, src_type='volume'),
title='mnepython',
overwrite=True)
logger.info('[done]')
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : string
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
_save_stc_as_volume(fname, self, src, dest=dest,
mri_resolution=mri_resolution)
def as_volume(self, src, dest='mri', mri_resolution=False):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
return _save_stc_as_volume(None, self, src, dest=dest,
mri_resolution=mri_resolution)
def __repr__(self): # noqa: D105
if isinstance(self.vertices, list):
nv = sum([len(v) for v in self.vertices])
else:
nv = self.vertices.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s" % ' x '.join(map(str, self.shape))
return "<VolSourceEstimate | %s>" % s
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
vert_idx, time_idx, _ = _get_peak(self.data, self.times, tmin, tmax,
mode)
return (vert_idx if vert_as_index else self.vertices[vert_idx],
time_idx if time_as_index else self.times[time_idx])
class VectorSourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : array | list of two arrays
Vertex numbers corresponding to the data.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.15
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : string
The file name to write the source estimate to, should end in
'-stc.h5'.
ftype : string
File format to use. Currently, the only allowed values is "h5".
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype != 'h5':
raise ValueError('VectorSourceEstimate objects can only be '
'written as HDF5 files.')
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data, tmin=self.tmin,
tstep=self.tstep, subject=self.subject),
title='mnepython', overwrite=True)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return SourceEstimate(data_mag, self.vertices, self.tmin, self.tstep,
self.subject, self.verbose)
def normal(self, src):
"""Compute activity orthogonal to the cortex.
Parameters
----------
src : instance of SourceSpaces
The source space for which this source estimate is specified.
Returns
-------
stc : instance of SourceEstimate
The source estimate only retaining the activity orthogonal to the
cortex.
"""
normals = np.vstack([s['nn'][v] for s, v in zip(src, self.vertices)])
data_norm = einsum('ijk,ij->ik', self.data, normals)
return SourceEstimate(data_norm, self.vertices, self.tmin, self.tstep,
self.subject, self.verbose)
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',
smoothing_steps=10, transparent=None, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer=False, subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground='white', initial_time=None,
time_unit='s'):
return plot_vector_source_estimates(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit
)
def __abs__(self):
"""Compute the absolute value of each component.
Returns
-------
stc_abs : VectorSourceEstimate
A vector source estimate where the data attribute is set to
abs(self.data).
See Also
--------
VectorSourceEstimate.magnitude
"""
return super(VectorSourceEstimate, self).__abs__()
class MixedSourceEstimate(_BaseSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of arrays of shape (n_dipoles,)
The indices of the dipoles in each source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.9.0
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%02.f ms',
smoothing_steps=10,
transparent=None, alpha=1.0, time_viewer=False,
config_opts=None, subjects_dir=None, figure=None,
views='lat', colorbar=True, clim='auto'):
"""Plot surface source estimates with PySurfer.
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
src : SourceSpaces
The source spaces to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use. See `plot_source_estimates`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing.
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the FreeSurfer subjects reconstructions.
It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | None
If None, the last figure will be cleaned and a new figure will
be created.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. See `plot_source_estimates`.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
# extract surface source spaces
surf = _ensure_src(src, kind='surf')
# extract surface source estimate
data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]
vertices = [s['vertno'] for s in surf]
stc = SourceEstimate(data, vertices, self.tmin, self.tstep,
self.subject, self.verbose)
return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,
colormap=colormap, time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
config_opts=config_opts,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar, clim=clim)
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : string
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : string
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
"""
if ftype != 'h5':
raise ValueError('MixedSourceEstimate objects can only be '
'written as HDF5 files.')
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject, src_type='mixed'),
title='mnepython',
overwrite=True)
logger.info('[done]')
###############################################################################
# Morphing
@verbose
def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
warn=True, verbose=None):
"""Morph data from one subject's source space to another.
Parameters
----------
data : array, or csr sparse matrix
A n_vertices [x 3] x n_times (or other dimension) dataset to morph.
idx_use : array of int
Vertices from the original subject's data.
e : sparse matrix
The mesh edges of the "from" subject.
smooth : int
Number of smoothing iterations to perform. A hard limit of 100 is
also imposed.
n_vertices : int
Number of vertices.
nearest : array of int
Vertices on the destination surface to use.
maps : sparse matrix
Morph map from one subject to the other.
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
data_morphed : array, or csr sparse matrix
The morphed data (same type as input).
"""
# When operating on vector data, morph each dimension separately
if data.ndim == 3:
data_morphed = np.zeros((len(nearest), 3, data.shape[2]),
dtype=data.dtype)
for dim in range(3):
data_morphed[:, dim, :] = _morph_buffer(
data=data[:, dim, :], idx_use=idx_use, e=e, smooth=smooth,
n_vertices=n_vertices, nearest=nearest, maps=maps, warn=warn,
verbose=verbose
)
return data_morphed
n_iter = 99 # max nb of smoothing iterations (minus one)
if smooth is not None:
if smooth <= 0:
raise ValueError('The number of smoothing operations ("smooth") '
'has to be at least 1.')
smooth -= 1
# make sure we're in CSR format
e = e.tocsr()
if sparse.issparse(data):
use_sparse = True
if not isinstance(data, sparse.csr_matrix):
data = data.tocsr()
else:
use_sparse = False
done = False
# do the smoothing
for k in range(n_iter + 1):
# get the row sum
mult = np.zeros(e.shape[1])
mult[idx_use] = 1
idx_use_data = idx_use
data_sum = e * mult
# new indices are non-zero sums
idx_use = np.where(data_sum)[0]
# typically want to make the next iteration have these indices
idx_out = idx_use
# figure out if this is the last iteration
if smooth is None:
if k == n_iter or len(idx_use) >= n_vertices:
# stop when vertices filled
idx_out = None
done = True
elif k == smooth:
idx_out = None
done = True
# do standard smoothing multiplication
data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out)
if done is True:
break
# do standard normalization
if use_sparse:
data.data /= data_sum[idx_use].repeat(np.diff(data.indptr))
else:
data /= data_sum[idx_use][:, None]
# do special normalization for last iteration
if use_sparse:
data_sum[data_sum == 0] = 1
data.data /= data_sum.repeat(np.diff(data.indptr))
else:
data[idx_use, :] /= data_sum[idx_use][:, None]
if len(idx_use) != len(data_sum) and warn:
warn_('%s/%s vertices not included in smoothing, consider increasing '
'the number of steps'
% (len(data_sum) - len(idx_use), len(data_sum)))
logger.info(' %d smooth iterations done.' % (k + 1))
data_morphed = maps[nearest, :] * data
return data_morphed
def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None):
"""Help morphing.
Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]"
but faster.
"""
if len(idx_use_data) < e.shape[1]:
if use_sparse:
data = e[:, idx_use_data] * data
else:
# constructing a new sparse matrix is faster than sub-indexing
# e[:, idx_use_data]!
col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data)
d_sparse = sparse.csr_matrix((data.ravel(),
(row.ravel(), col.ravel())),
shape=(e.shape[1], data.shape[1]))
data = e * d_sparse
data = np.asarray(data.todense())
else:
data = e * data
# trim data
if idx_use_out is not None:
data = data[idx_use_out]
return data
def _get_subject_sphere_tris(subject, subjects_dir):
spheres = [op.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
tris = [read_surface(s)[1] for s in spheres]
return tris
def _sparse_argmax_nnz_row(csr_mat):
"""Return index of the maximum non-zero index in each row."""
n_rows = csr_mat.shape[0]
idx = np.empty(n_rows, dtype=np.int)
for k in range(n_rows):
row = csr_mat[k].tocoo()
idx[k] = row.col[np.argmax(row.data)]
return idx
def _morph_sparse(stc, subject_from, subject_to, subjects_dir=None):
"""Morph sparse source estimates to an other subject.
Parameters
----------
stc : SourceEstimate | VectorSourceEstimate
The sparse STC.
subject_from : str
The subject on which stc is defined.
subject_to : str
The target subject.
subjects_dir : str
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
stc_morph : SourceEstimate | VectorSourceEstimate
The morphed source estimates.
"""
maps = read_morph_map(subject_to, subject_from, subjects_dir)
stc_morph = stc.copy()
stc_morph.subject = subject_to
cnt = 0
for k, hemi in enumerate(['lh', 'rh']):
if stc.vertices[k].size > 0:
map_hemi = maps[k]
vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertices[k]])
order = np.argsort(vertno_k)
n_active_hemi = len(vertno_k)
data_hemi = stc_morph.data[cnt:cnt + n_active_hemi]
stc_morph.data[cnt:cnt + n_active_hemi] = data_hemi[order]
stc_morph.vertices[k] = vertno_k[order]
cnt += n_active_hemi
else:
stc_morph.vertices[k] = np.array([], int)
return stc_morph
@verbose
def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
subjects_dir=None, buffer_size=64, n_jobs=1, warn=True,
verbose=None):
"""Morph a source estimate from one subject to another.
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
stc_from : SourceEstimate | VectorSourceEstimate
Source estimates for subject "from" to morph
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc_to : SourceEstimate | VectorSourceEstimate
Source estimate for the destination subject.
"""
if not isinstance(stc_from, _BaseSurfaceSourceEstimate):
raise ValueError('Morphing is only possible with surface or vector '
'source estimates')
logger.info('Morphing data...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
# morph the data
data = [stc_from.lh_data, stc_from.rh_data]
data_morphed = [None, None]
n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size))
parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs)
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = stc_from.vertices[hemi]
if len(idx_use) == 0:
continue
data_morphed[hemi] = np.concatenate(
parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
n_vertices, nearest[hemi], maps[hemi],
warn=warn)
for data_buffer
in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
vertices = [nearest[0], nearest[1]]
if data_morphed[0] is None:
if data_morphed[1] is None:
data = np.r_[[], []]
vertices = [np.array([], int), np.array([], int)]
else:
data = data_morphed[1]
vertices = [np.array([], int), vertices[1]]
elif data_morphed[1] is None:
data = data_morphed[0]
vertices = [vertices[0], np.array([], int)]
else:
data = np.r_[data_morphed[0], data_morphed[1]]
if isinstance(stc_from, VectorSourceEstimate):
stc_to = VectorSourceEstimate(data, vertices, stc_from.tmin,
stc_from.tstep, subject=subject_to,
verbose=stc_from.verbose)
else:
stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep,
subject=subject_to, verbose=stc_from.verbose)
logger.info('[done]')
return stc_to
@verbose
def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
smooth=None, subjects_dir=None, warn=True,
xhemi=False, verbose=None):
"""Get a matrix that morphs data from one subject to another.
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
vertices_from : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_from.
vertices_to : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_to.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string
Path to SUBJECTS_DIR is not set in the environment.
warn : bool
If True, warn if not all vertices were used.
xhemi : bool
Morph across hemisphere. Currently only implemented for
``subject_to == subject_from``. See notes below.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
morph_matrix : sparse matrix
matrix that morphs data from ``subject_from`` to ``subject_to``.
Notes
-----
This function can be used to morph data between hemispheres by setting
``xhemi=True``. The full cross-hemisphere morph matrix maps left to right
and right to left. A matrix for cross-mapping only one hemisphere can be
constructed by specifying the appropriate vertices, for example, to map the
right hemisphere to the left:
``vertices_from=[[], vert_rh], vertices_to=[vert_lh, []]``.
Cross-hemisphere mapping requires appropriate ``sphere.left_right``
morph-maps in the subject's directory. These morph maps are included
with the ``fsaverage_sym`` FreeSurfer subject, and can be created for other
subjects with the ``mris_left_right_register`` FreeSurfer command. The
``fsaverage_sym`` subject is included with FreeSurfer > 5.1 and can be
obtained as described `here
<http://surfer.nmr.mgh.harvard.edu/fswiki/Xhemi>`_. For statistical
comparisons between hemispheres, use of the symmetric ``fsaverage_sym``
model is recommended to minimize bias [1]_.
References
----------
.. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M.
R., Fischl B., Brysbaert M.
A Surface-based Analysis of Language Lateralization and Cortical
Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013.
"""
logger.info('Computing morph matrix...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir, xhemi)
if xhemi:
hemi_indexes = [(0, 1), (1, 0)]
else:
hemi_indexes = [(0, 0), (1, 1)]
morpher = []
for hemi_from, hemi_to in hemi_indexes:
idx_use = vertices_from[hemi_from]
if len(idx_use) == 0:
continue
e = mesh_edges(tris[hemi_from])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
m = sparse.eye(len(idx_use), len(idx_use), format='csr')
mm = _morph_buffer(m, idx_use, e, smooth, n_vertices,
vertices_to[hemi_to], maps[hemi_from], warn=warn)
morpher.append(mm)
if len(morpher) == 0:
raise ValueError("Empty morph-matrix")
elif len(morpher) == 1:
morpher = morpher[0]
else:
morpher = sparse_block_diag(morpher, format='csr')
logger.info('[done]')
return morpher
@verbose
def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
verbose=None):
"""Convert a grade to source space vertices for a given subject.
Parameters
----------
subject : str
Name of the subject
grade : int | list
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
vertices : list of arrays of int
Vertex numbers for LH and RH
"""
# add special case for fsaverage for speed
if subject == 'fsaverage' and grade == 5:
return [np.arange(10242), np.arange(10242)]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
spheres_to = [op.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
lhs, rhs = [read_surface(s)[0] for s in spheres_to]
if grade is not None: # fill a subset of vertices
if isinstance(grade, list):
if not len(grade) == 2:
raise ValueError('grade as a list must have two elements '
'(arrays of output vertices)')
vertices = grade
else:
# find which vertices to use in "to mesh"
ico = _get_ico_tris(grade, return_surf=True)
lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
# Compute nearest vertices in high dim mesh
parallel, my_compute_nearest, _ = \
parallel_func(_compute_nearest, n_jobs)
lhs, rhs, rr = [a.astype(np.float32)
for a in [lhs, rhs, ico['rr']]]
vertices = parallel(my_compute_nearest(xhs, rr)
for xhs in [lhs, rhs])
# Make sure the vertices are ordered
vertices = [np.sort(verts) for verts in vertices]
for verts in vertices:
if (np.diff(verts) == 0).any():
raise ValueError(
'Cannot use icosahedral grade %s with subject %s, '
'mapping %s vertices onto the high-resolution mesh '
'yields repeated vertices, use a lower grade or a '
'list of vertices from an existing source space'
% (grade, subject, len(verts)))
else: # potentially fill the surface
vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
return vertices
def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to,
morph_mat):
"""Morph source estimate between subjects using a precomputed matrix.
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
stc_from : SourceEstimate | VectorSourceEstimate
Source estimates for subject "from" to morph.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, typically from compute_morph_matrix.
Returns
-------
stc_to : SourceEstimate | VectorSourceEstimate
Source estimate for the destination subject.
"""
if not sparse.issparse(morph_mat):
raise ValueError('morph_mat must be a sparse matrix')
if not isinstance(vertices_to, list) or not len(vertices_to) == 2:
raise ValueError('vertices_to must be a list of length 2')
if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]:
raise ValueError('number of vertices in vertices_to must match '
'morph_mat.shape[0]')
if not stc_from.data.shape[0] == morph_mat.shape[1]:
raise ValueError('stc_from.data.shape[0] must be the same as '
'morph_mat.shape[0]')
if stc_from.subject is not None and stc_from.subject != subject_from:
raise ValueError('stc_from.subject and subject_from must match')
if isinstance(stc_from, VectorSourceEstimate):
# Morph the locations of the dipoles, but not their orientation
n_verts, _, n_samples = stc_from.data.shape
data = morph_mat * stc_from.data.reshape(n_verts, 3 * n_samples)
data = data.reshape(morph_mat.shape[0], 3, n_samples)
stc_to = VectorSourceEstimate(data, vertices=vertices_to,
tmin=stc_from.tmin, tstep=stc_from.tstep,
verbose=stc_from.verbose,
subject=subject_to)
else:
data = morph_mat * stc_from.data
stc_to = SourceEstimate(data, vertices=vertices_to, tmin=stc_from.tmin,
tstep=stc_from.tstep, verbose=stc_from.verbose,
subject=subject_to)
return stc_to
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]['shape'][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]['vertno']] = True
return mask
def _spatio_temporal_src_connectivity_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
connectivity = _get_connectivity_from_edges(edges, n_times)
return connectivity
def _spatio_temporal_src_connectivity_surf(src, n_times):
if src[0]['use_tris'] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError("The source space does not appear to be an ico "
"surface. Connectivity cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off
for u_v, s, off in zip(used_verts, src, offs)])
connectivity = spatio_temporal_tris_connectivity(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:
raise ValueError('Used vertices do not match connectivity shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn_('%0.1f%% of original source space vertices have been'
' omitted, tri-based connectivity will have holes.\n'
'Consider using distance-based connectivity or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
connectivity = connectivity.tocsr()
connectivity = connectivity[masks]
connectivity = connectivity[:, masks]
# return to original format
connectivity = connectivity.tocoo()
return connectivity
@verbose
def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
"""Compute connectivity for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute connectivity for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]['type'] == 'vol':
if dist is not None:
raise ValueError('dist must be None for a volume '
'source space. Got %s.' % dist)
connectivity = _spatio_temporal_src_connectivity_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
connectivity = spatio_temporal_dist_connectivity(src, n_times, dist)
else:
connectivity = _spatio_temporal_src_connectivity_surf(src, n_times)
return connectivity
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_connectivity.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute connectivity from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris).tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):
"""Compute connectivity from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
n_times : int
Number of time points
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using\n'
'mne_add_patch_info with --dist argument')
edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]
for s in src])
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatial_src_connectivity(src, dist=None, verbose=None):
"""Compute connectivity for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_src_connectivity(src, 1, dist)
@verbose
def spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):
"""Compute connectivity from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)
def spatial_dist_connectivity(src, dist, verbose=None):
"""Compute connectivity from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_connectivity(src, 1, dist)
def spatial_inter_hemi_connectivity(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric connectivity matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surf')
conn = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
conn = sparse.csr_matrix(conn <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in conn.shape]
conn = sparse.vstack([sparse.hstack([empties[0], conn]),
sparse.hstack([conn.T, empties[1]])])
return conn
@verbose
def _get_connectivity_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create connectivity matrix."""
n_vertices = edges.shape[0]
logger.info("-- number of connected vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int)
connectivity = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices, ) * 2)
return connectivity
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
@deprecated("This function is deprecated and will be removed in version 0.18. "
"Use instead stc.as_volume or stc.save_as_volume methods.")
def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : string | None
The name of the generated nifti file. If None, the image is only
returned and not saved.
stc : instance of VolSourceEstimate
The source estimate
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
return _save_stc_as_volume(fname, stc, src, dest='mri',
mri_resolution=False)
def _save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : string | None
The name of the generated nifti file. If None, the image is only
returned and not saved.
stc : instance of VolSourceEstimate
The source estimate
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
if not isinstance(stc, VolSourceEstimate):
raise ValueError('Only volume source estimates can be saved as '
'volumes')
src_type = _get_src_type(src, None)
if src_type != 'volume':
raise ValueError('You need a volume source space. Got type: %s.'
% src_type)
n_times = stc.data.shape[1]
shape = src[0]['shape']
shape3d = (shape[2], shape[1], shape[0])
shape = (n_times, shape[2], shape[1], shape[0])
vol = np.zeros(shape)
if mri_resolution:
mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_vol = np.zeros(mri_shape)
interpolator = src[0]['interpolator']
n_vertices_seen = 0
for this_src in src:
assert tuple(this_src['shape']) == tuple(src[0]['shape'])
mask3d = this_src['inuse'].reshape(shape3d).astype(np.bool)
n_vertices = np.sum(mask3d)
for k, v in enumerate(vol): # loop over time instants
stc_slice = slice(n_vertices_seen, n_vertices_seen + n_vertices)
v[mask3d] = stc.data[stc_slice, k]
n_vertices_seen += n_vertices
if mri_resolution:
for k, v in enumerate(vol):
mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d)
vol = mri_vol
vol = vol.T
if mri_resolution:
affine = src[0]['vox_mri_t']['trans'].copy()
else:
affine = src[0]['src_mri_t']['trans'].copy()
if dest == 'mri':
affine = np.dot(src[0]['mri_ras_t']['trans'], affine)
affine[:3] *= 1e3
try:
import nibabel as nib # lazy import to avoid dependency
except ImportError:
raise ImportError("nibabel is required to save volume images.")
header = nib.nifti1.Nifti1Header()
header.set_xyzt_units('mm', 'msec')
header['pixdim'][4] = 1e3 * stc.tstep
with warnings.catch_warnings(record=True): # nibabel<->numpy warning
img = nib.Nifti1Image(vol, affine, header=header)
if fname is not None:
nib.save(img, fname)
return img
def _get_label_flip(labels, label_vertidx, src):
"""Get sign-flip for labels."""
# do the import here to avoid circular dependency
from .label import label_sign_flip
# get the sign-flip vector for every label
label_flip = list()
for label, vertidx in zip(labels, label_vertidx):
if vertidx is not None:
flip = label_sign_flip(label, src)[:, None]
else:
flip = None
label_flip.append(flip)
return label_flip
@verbose
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, verbose=None):
"""Generate extract_label_time_course."""
# if src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be the
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space
if len(src) > 2:
if src[0]['type'] != 'surf' or src[1]['type'] != 'surf':
raise ValueError('The first 2 source spaces have to be surf type')
if any(np.any(s['type'] != 'vol') for s in src[2:]):
raise ValueError('source spaces have to be of vol type')
n_aparc = len(labels)
n_aseg = len(src[2:])
n_labels = n_aparc + n_aseg
else:
n_labels = len(labels)
# get vertices from source space, they have to be the same as in the stcs
vertno = [s['vertno'] for s in src]
nvert = [len(vn) for vn in vertno]
# do the initialization
label_vertidx = list()
for label in labels:
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertno = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertno)
elif slabel.hemi == 'rh':
this_vertno = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
if len(this_vertidx) == 0:
msg = ('source space does not contain any vertices for label %s'
% label.name)
if not allow_empty:
raise ValueError(msg)
else:
warn_(msg + '. Assigning all-zero time series to label.')
this_vertidx = None # to later check if label is empty
label_vertidx.append(this_vertidx)
# mode-dependent initialization
if mode == 'mean':
pass # we have this here to catch invalid values for mode
elif mode == 'mean_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src[:2])
elif mode == 'pca_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src[:2])
elif mode == 'max':
pass # we calculate the maximum value later
else:
raise ValueError('%s is an invalid mode' % mode)
# loop through source estimates and extract time series
for stc in stcs:
# make sure the stc is compatible with the source space
for i in range(len(src)):
if len(stc.vertices[i]) != nvert[i]:
raise ValueError('stc not compatible with source space. '
'stc has %s time series but there are %s '
'vertices in source space'
% (len(stc.vertices[i]), nvert[i]))
if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):
raise ValueError('stc not compatible with source space')
if sum(nvert) != stc.shape[0]:
raise ValueError('stc not compatible with source space. '
'stc has %s vertices but the source space '
'has %s vertices'
% (stc.shape[0], sum(nvert)))
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels, stc.data.shape[1]),
dtype=stc.data.dtype)
if mode == 'mean':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.mean(stc.data[vertidx, :], axis=0)
elif mode == 'mean_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0)
elif mode == 'pca_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
U, s, V = linalg.svd(stc.data[vertidx, :],
full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(vertidx))
label_tc[i] = sign * scale * V[0]
elif mode == 'max':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.max(np.abs(stc.data[vertidx, :]), axis=0)
else:
raise ValueError('%s is an invalid mode' % mode)
# extract label time series for the vol src space
if len(src) > 2:
v1 = nvert[0] + nvert[1]
for i, nv in enumerate(nvert[2:]):
v2 = v1 + nv
v = range(v1, v2)
if nv != 0:
label_tc[n_aparc + i] = np.mean(stc.data[v, :], axis=0)
v1 = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='mean_flip',
allow_empty=False, return_generator=False,
verbose=None):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter.
Valid values for mode are:
- 'mean': Average within each label.
- 'mean_flip': Average within each label with sign flip depending
on source orientation.
- 'pca_flip': Apply an SVD to the time courses within each label
and use the scaled and sign-flipped first right-singular vector
as the label time course. The scaling is performed such that the
power of the label time course is the same as the average
per-vertex time course power within the label. The sign of the
resulting time course is adjusted by multiplying it with
"sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This
procedure assures that the phase does not randomly change by 180
degrees from one stc to the next.
- 'max': Max value within each label.
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
labels : Label | BiHemiLabel | list of Label or BiHemiLabel
The labels for which to extract the time course.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time courses for labels
that do not have any vertices in the source estimate.
return_generator : bool
If True, a generator instead of a list is returned.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
label_tc : array | list (or generator) of array, shape=(len(labels), n_times)
Extracted time course for each label and source estimate.
""" # noqa: E501
# convert inputs to lists
if isinstance(stcs, SourceEstimate):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
if not isinstance(labels, list):
labels = [labels]
label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,
allow_empty=allow_empty)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
|
teonlamont/mne-python
|
mne/source_estimate.py
|
Python
|
bsd-3-clause
| 133,598
|
[
"Mayavi"
] |
9b6298928554aba2127d8975f92780e3b5ec5c34599d6a80404106e2df40d72a
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui import Object
class APopup(Object):
def popup(self, app, title, message, icon):
raise NotImplementedError()
|
bhdouglass/agui
|
agui/aextras/popup.py
|
Python
|
gpl-3.0
| 894
|
[
"Brian"
] |
91649897539235261157993347bfe3d84b694482fe7f3af141f8206d6d2f333a
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import OrderedDict
from acq4.util import Qt
from .CanvasItem import CanvasItem
import numpy as np
import scipy.ndimage as ndimage
import acq4.pyqtgraph as pg
import acq4.pyqtgraph.flowchart
import acq4.util.DataManager as DataManager
import acq4.util.debug as debug
from .itemtypes import registerItemType
class ImageCanvasItem(CanvasItem):
"""
CanvasItem displaying an image.
The image may be 2 or 3-dimensional.
Options:
image: May be a fileHandle, ndarray, or GraphicsItem.
handle: May optionally be specified in place of image
"""
_typeName = "Image"
def __init__(self, image=None, **opts):
## If no image was specified, check for a file handle..
if image is None:
image = opts.get('handle', None)
item = None
self.data = None
if isinstance(image, Qt.QGraphicsItem):
item = image
elif isinstance(image, np.ndarray):
self.data = image
elif isinstance(image, DataManager.FileHandle):
opts['handle'] = image
self.handle = image
self.data = self.handle.read()
if 'name' not in opts:
opts['name'] = self.handle.shortName()
try:
if 'transform' in self.handle.info():
tr = pg.SRTTransform3D(self.handle.info()['transform'])
tr = pg.SRTTransform(tr) ## convert to 2D
opts['pos'] = tr.getTranslation()
opts['scale'] = tr.getScale()
opts['angle'] = tr.getRotation()
else: ## check for older info formats
if 'imagePosition' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
opts['pos'] = self.handle.info()['imagePosition']
elif 'Downsample' in self.handle.info():
### Needed to support an older format stored by 2p imager
if 'pixelSize' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
if 'microscope' in self.handle.info():
m = self.handle.info()['microscope']
opts['pos'] = m['position'][0:2]
else:
info = self.data._info[-1]
opts['pos'] = info.get('imagePosition', None)
elif hasattr(self.data, '_info'):
info = self.data._info[-1]
opts['scale'] = info.get('pixelSize', None)
opts['pos'] = info.get('imagePosition', None)
else:
opts['defaultUserTransform'] = {'scale': (1e-5, 1e-5)}
opts['scalable'] = True
except:
debug.printExc('Error reading transformation for image file %s:' % image.name())
if item is None:
item = pg.ImageItem()
CanvasItem.__init__(self, item, **opts)
self.splitter = Qt.QSplitter()
self.splitter.setOrientation(Qt.Qt.Vertical)
self.layout.addWidget(self.splitter, self.layout.rowCount(), 0, 1, 2)
self.filterGroup = pg.GroupBox("Image Filter")
fgl = Qt.QGridLayout()
fgl.setContentsMargins(3, 3, 3, 3)
fgl.setSpacing(1)
self.filterGroup.setLayout(fgl)
self.filter = ImageFilterWidget()
self.filter.sigStateChanged.connect(self.filterStateChanged)
fgl.addWidget(self.filter)
self.splitter.addWidget(self.filterGroup)
self.histogram = pg.HistogramLUTWidget()
self.histogram.setImageItem(self.graphicsItem())
# addWidget arguments: row, column, rowspan, colspan
self.splitter.addWidget(self.histogram)
self.imgModeCombo = Qt.QComboBox()
self.imgModeCombo.addItems(['SourceOver', 'Overlay', 'Plus', 'Multiply'])
self.layout.addWidget(self.imgModeCombo, self.layout.rowCount(), 0, 1, 1)
self.imgModeCombo.currentIndexChanged.connect(self.imgModeChanged)
self.autoBtn = Qt.QPushButton("Auto")
self.autoBtn.setCheckable(True)
self.autoBtn.setChecked(True)
self.layout.addWidget(self.autoBtn, self.layout.rowCount()-1, 1, 1, 1)
self.timeSlider = Qt.QSlider(Qt.Qt.Horizontal)
self.layout.addWidget(self.timeSlider, self.layout.rowCount(), 0, 1, 2)
self.timeSlider.valueChanged.connect(self.timeChanged)
# ## controls that only appear if there is a time axis
self.timeControls = [self.timeSlider]
if self.data is not None:
if isinstance(self.data, pg.metaarray.MetaArray):
self.filter.setInput(self.data.asarray())
else:
self.filter.setInput(self.data)
self.updateImage()
# Needed to ensure selection box wraps the image properly
tr = self.saveTransform()
self.resetUserTransform()
self.restoreTransform(tr)
# Why doesn't this work?
#self.selectBoxFromUser() ## move select box to match new bounds
@classmethod
def checkFile(cls, fh):
if not fh.isFile():
return 0
ext = fh.ext().lower()
if ext == '.ma':
return 10
elif ext in ['.ma', '.png', '.jpg', '.tif']:
return 100
return 0
def timeChanged(self, t):
self.updateImage()
def imgModeChanged(self):
mode = str(self.imgModeCombo.currentText())
self.graphicsItem().setCompositionMode(getattr(Qt.QPainter, 'CompositionMode_' + mode))
def filterStateChanged(self):
self.updateImage()
def updateImage(self):
img = self.graphicsItem()
# Try running data through flowchart filter
data = self.filter.output()
if data is None:
data = self.data
if data.ndim == 4:
showTime = True
elif data.ndim == 3:
if data.shape[2] <= 4: ## assume last axis is color
showTime = False
else:
showTime = True
else:
showTime = False
if showTime:
self.timeSlider.setMinimum(0)
self.timeSlider.setMaximum(data.shape[0]-1)
self.graphicsItem().setImage(data[self.timeSlider.value()], autoLevels=self.autoBtn.isChecked())
else:
self.graphicsItem().setImage(data, autoLevels=self.autoBtn.isChecked())
for widget in self.timeControls:
widget.setVisible(showTime)
def saveState(self, **kwds):
state = CanvasItem.saveState(self, **kwds)
state['imagestate'] = self.histogram.saveState()
state['filter'] = self.filter.saveState()
state['composition'] = self.imgModeCombo.currentText()
return state
def restoreState(self, state):
CanvasItem.restoreState(self, state)
self.filter.restoreState(state['filter'])
self.imgModeCombo.setCurrentIndex(self.imgModeCombo.findText(state['composition']))
self.histogram.restoreState(state['imagestate'])
registerItemType(ImageCanvasItem)
class ImageFilterWidget(Qt.QWidget):
sigStateChanged = Qt.Signal()
def __init__(self):
Qt.QWidget.__init__(self)
self.layout = Qt.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# Set up filter buttons
self.btns = OrderedDict()
row, col = 0, 0
for name in ['Mean', 'Max', 'Max w/Gaussian', 'Max w/Median', 'Edge']:
btn = Qt.QPushButton(name)
self.btns[name] = btn
btn.setCheckable(True)
self.layout.addWidget(btn, row, col)
btn.clicked.connect(self.filterBtnClicked)
col += 1
if col > 1:
col = 0
row += 1
# show flowchart control panel inside a collapsible group box
self.fcGroup = pg.GroupBox('Filter Settings')
fgl = Qt.QVBoxLayout()
self.fcGroup.setLayout(fgl)
fgl.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.fcGroup, row+1, 0, 1, 2)
self.fc = pg.flowchart.Flowchart(terminals={'dataIn': {'io':'in'}, 'dataOut': {'io':'out'}})
fgl.addWidget(self.fc.widget())
self.fcGroup.setCollapsed(True)
self.fc.sigStateChanged.connect(self.sigStateChanged)
def filterBtnClicked(self, checked):
# remember slice before clearing fc
snode = self.fc.nodes().get('Slice', None)
if snode is not None:
snstate = snode.saveState()
else:
snstate = None
print(snstate)
self.fc.clear()
if not checked:
return
btn = self.sender()
# uncheck all other filter btns
for b in self.btns.values():
if b is not btn:
b.setChecked(False)
name = btn.text()
if name == 'Mean':
s = self.fc.createNode('Slice', name="Slice")
m = self.fc.createNode('Mean', name="Mean", pos=[150, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max':
s = self.fc.createNode('Slice', name="Slice")
m = self.fc.createNode('Max', name="Max", pos=[150, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max w/Gaussian':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f = self.fc.createNode('GaussianFilter', name="GaussianFilter", pos=[70, 0])
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f['In'])
self.fc.connectTerminals(f['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max w/Median':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f = self.fc.createNode('MedianFilter', name="MedianFilter", pos=[70, 0])
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f['In'])
self.fc.connectTerminals(f['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Edge':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f1 = self.fc.createNode('PythonEval', name='GaussDiff', pos=[70, 0])
f1.setCode("""
from scipy.ndimage import gaussian_filter
img = args['input'].astype(float)
edge = gaussian_filter(img, (0, 2, 2)) - gaussian_filter(img, (0, 1, 1))
return {'output': edge}
""")
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f1['input'])
self.fc.connectTerminals(f1['output'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
# restore slice is possible
if snstate is not None:
snode = self.fc.nodes().get('Slice', None)
if snode is not None:
print("restore!")
snode.restoreState(snstate)
def setInput(self, img):
self.fc.setInput(dataIn=img)
def output(self):
return self.fc.output()['dataOut']
def process(self, img):
return self.fc.process(dataIn=img)['dataOut']
def saveState(self):
return {'flowchart': self.fc.saveState()}
def restoreState(self, state):
self.fc.restoreState(state['flowchart'])
|
pbmanis/acq4
|
acq4/util/Canvas/items/ImageCanvasItem.py
|
Python
|
mit
| 12,416
|
[
"Gaussian"
] |
08e6c41a3066ed525e836790e6caca4c656193a17334156258fcf18bda758ee4
|
#!/usr/bin/env python
'''
First script to reduce the data from the beta release of Radio Galaxy Zoo (Oct 2013)
Written by Julie Banfield, CSIRO
'''
# import necessary python packages
import numpy as np
import matplotlib # plotting package
import pylab as plt # part of plotting package
import datetime
import pandas as pd
import os, sys
from pymongo import MongoClient
from bson import ObjectId
from astropy.io import fits
from astropy import wcs
#from astropy import coordinates as coord
#from astropy.io import votable
#------------------------------------------------------------------------------------------------------------
# Define some functions
#http://radio.galaxyzoo.org.s3.amazonaws.com/beta/subjects/radio/S334.jpg
#http://radio.galaxyzoo.org.s3.amazonaws.com/beta/subjects/standard/S334.jpg
#http://radio.galaxyzoo.org.s3.amazonaws.com/beta/subjects/raw/S334.fits.gz
# (1) Determine the WCS object based on subject
def getWCSObj(subject):
src = subject["metadata"]["source"]
path = "./IMGS/%s.fits" % src
hdulist = fits.open(path)
w = wcs.WCS(hdulist[0].header)
return w
#------------------------------------------------------------------------------------------------------------
# Set constants
beta_release_date = datetime.datetime(2013, 9, 10, 12, 0, 0, 0) # date of beta release
IMG_HEIGHT = 424.0 # number of pixels in the JPG image along the y axis
IMG_WIDTH = 424.0 # number of pixels in teh JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image along the x axis
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xjpg2fits = float(IMG_WIDTH/FITS_WIDTH) # map the JPG pixels to the FITS pixels in x
yjpg2fits = float(IMG_HEIGHT/FITS_HEIGHT) # map the JPG pixels to the FITS pixels in y
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database before running
client = MongoClient('localhost', 27017)
db = client['ouroboros']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
Nsubjects = subjects.count() # determine the number of images in the data set
Nclassifications = classifications.find({"updated_at": {"$gt": beta_release_date}}).count() # total number of classifications
# query database
batch = classifications.find({"updated_at": {"$gt": beta_release_date}})
print ' '
print '%s Release Date' % beta_release_date
print '---------------------------------'
print 'Total subjects : %d' % Nsubjects
print 'Total classifications : %d' % Nclassifications
print ' '
classfile = open('RGZBETA-class.dat', 'w')
#compfile = open('RGZBETA-components.dat','w')
#print >> compfile,'{0:10} {1:7} {2:7} {3:11} {4:7} {5:11}'.format('#SRC_ID ','N_TOTAL','N_RADIO','N_RADIO_MED',' N_IR',' N_IR_MED')
#------------------------------------------------------------------------------------------------------------------------
#---START: loop through images/subjects
for item in list(subjects.find().limit(100)):
Nclass = item["classification_count"] # number of classifications made per image
if Nclass <= 0: # if no classifications move to next image
continue
srcid = item["metadata"]["source"] # determine the image source id
classfile2 = open('RGZBETA-%s-classifications.dat' % srcid, 'w')
wgetcmd = 'wget http://radio.galaxyzoo.org.s3.amazonaws.com/subjects/raw/' + srcid + '.fits.gz'
os.system(wgetcmd)
mvcmd = 'mv '+ srcid + '.fits.gz ./IMGS/'
os.system(mvcmd)
zipcmd = 'gunzip ./IMGS/%s.fits.gz' % srcid
os.system(zipcmd)
wcsObj = getWCSObj(item) # get WCS object for pixel to sky transformation
zipcmd = 'gzip ./IMGS/%s.fits' % srcid
os.system(zipcmd)
print >> classfile, srcid, Nclass
print >> classfile, '-----------'
annfile = open('%s.ann' % srcid, 'w') # kvis annotation file
print >> annfile, '#KARMA annotation file'
print >> annfile, 'COORD W'
print >> annfile, 'PA STANDARD'
print >> annfile, 'FONT hershey14'
imgid = item["_id"] # grab the ObjectId corresponding for this image
# locat all the classifications of this image by user
user_classifications = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": beta_release_date}})
# count the number of users who classified this object
Nusers = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": beta_release_date}}).count()
# loop over the number of classifications
if Nclass == Nusers: # the number of classifications should equal the number
# of users who classified
# initialise coordinate variables
radio_ra = []
radio_dec = []
radio_x = []
radio_y = []
radio_w = []
radio_h = []
ir_ra = []
ir_dec = []
ir_radius = []
ir_x = []
ir_y = []
radio_comp = []
ir_comp = []
# loop over users who classified
print 'Working on source: %s' % srcid
Nuser_id = 0 # User id number
#---------------------------------------------------------------------------------------------------------------------
#---START: loop through the users who classified the image
for k in list(user_classifications):
compid = 0 # Component id per image
c = k["annotations"][0] # get the user annotations
print >> classfile, c
Nuser_id = Nuser_id + 1 # increase the number of user who classified by 1.
#-------------------------------------------------------------------------------------------------------------------
#---START: loop through the keys in the annotation array, making sure that a classification has been made
for key in c:
if key in ["started_at", "finished_at"]: # no user classifications, move to next classification
continue
markings = c[key] # get keys related to the user annotations
Nradio = 0 # counter for the number of radio componets per classification
Nir = 0 # counter for the number of IR components per classification
if markings.has_key("radio"): # get the radio annotations
radio = markings["radio"]
Nradio = len(radio) # count the number of radio components per classification
compid = compid + 1 # we have a radio source - all componets will be id with this number
#---------------------------------------------------------------------------------------------------------------
#---START: loop through number of radio components in user classification
for rr in radio:
radio_marking = radio[rr]
# NOTE: JPG images begin at (0,0) in the upper right of the image. FITS images begin at (1,1) in
# the lower left of the image. I have added 1.0 to the x and y values of the annotation.
# The pixels in the JPG image increaser to the right for x and down for y.
xULC = 1.0 + float(radio_marking["x"]) # x pixel position in the JPG file for the ULC of the box
# surrounding the radio blob. Usually the 3 sigma contour.
yULC = 1.0 + float(radio_marking["y"]) # y pixel position in the JPG file for the ULC of the box
# surrounding the radio blob. Usually the 3 sigma contour.
yULC = (IMG_HEIGHT - yULC) # transfer the pixel value to increase upwards in y in JPG pixels.
width = float(radio_marking["width"]) # width of the box surrounding the radio blob in JPG pixels.
height = float(radio_marking["height"]) # height of the box surrounding the radio blob in JPG pixels.
# convert the pixels into FITS image pixels
xULC = xULC/xjpg2fits # x pixel position in the FITS image for the ULC of the box
# surrounding the radio blob.
yULC = yULC/yjpg2fits # y pixel position in the FITS image for the ULC of the box
# surrounding the radio blob.
width = width/xjpg2fits # width of the box surround the radio blob in FITS pixels.
height = height/yjpg2fits # height of the box surrounding the radio blob in FITS pixels.
# Want to create a kvis annotation file to overlay on source
# the radio will be given by the box and the IR will be shown in a circle
#
# BOX [coord type] <x1> <y1> <x2> <y2>
# CIRCLE [coord type] <x_cent> <y_cent> <radius>
coords1 = wcsObj.wcs_pix2world([[xULC,yULC]], 0)[0] # convert ULC pixels to RA and DEC (degrees)
xBRC = xULC + width # x pixel position in the FITS image for the BRC of the box
# surrounding the radio blobe.
yBRC = yULC - height # y pixel position in the FITS image for the BRC of the box
# surrounding the radio blobe.
coords2 = wcsObj.wcs_pix2world([[xBRC,yBRC]], 0)[0] # convert BRC pixels to RA and DEC (degrees)
# write to annotation file
print >> annfile, 'COLOUR BLUE'
print >> annfile, 'BOX',coords1[0],coords1[1],coords2[0],coords2[1]
print >> classfile2, Nuser_id, compid,'RADIO', xBRC, yBRC, width, height, coords1[0],coords1[1]
#---END: loop through number of radio components in user classification
#---------------------------------------------------------------------------------------------------------------
# get IR counterpart
if markings.has_key("infrared"): # get the infrared annotation for the radio classification.
ir = markings["infrared"]
Nir = 1 #len(ir) # number of IR counterparts. NOTE: for beta this is 1 but
# future Galaxy Zoo: Radio releases this will be changed.
jj = 0
for ii in ir:
ir_marking = ir[ii]
if jj == 0: yir = 1.0 + float(ir_marking) # y pixel location in the JPG image for the IR source.
if jj == 1: xir = 1.0 + float(ir_marking) # x pixel location in the JPG image for the IR source.
if jj == 2: radiusir = float(ir_marking) # radius of circle centred at xir,yir.
jj = jj+1
# convert the pixels into FITS image pixels
xir = xir/xjpg2fits # x pixel location in FITS image for IR source.
yir = (IMG_HEIGHT - yir)/xjpg2fits # y pixel location in FITS image for IR source.
radiusir = (radiusir/xjpg2fits) * PIXEL_SIZE # radius of circle around IR source in degrees in FITS image.
# NOTE: radius removed in following releases of project.
coords3 = wcsObj.wcs_pix2world([[xir,yir]], 0)[0] # convert IR pixel location to RA and DEC (degrees)
# write to annotation file
print >> annfile, 'COLOUR RED'
print >> annfile, 'CIRCLE',coords3[0],coords3[1],radiusir
print >> annfile, 'CROSS',coords3[0],coords3[1],0.0005,0.0005
print >> classfile2, Nuser_id, compid, 'IR', xir, yir, radiusir, coords3[0],coords3[1]
else: # user did not classify an infrared source
Nir = 0
xir = -99.
yir = -99.
radiusir = -99.
print >> classfile2, Nuser_id, compid, 'IR', xir, yir, radiusir, -99.0, -99.0
else: # user did not classifiy a radio source
Nradio = 0
Nir = 0
# there should always be a radio source, bug in program if we reach this part.
print ''
print >> classfile2,'No radio source?...error on image %s' % srcid
#print 'exiting.....'
#exit()
radio_comp.append( Nradio ) # add the number of radio components per user source to array.
ir_comp.append( Nir ) # add the number of IR counterparts per user soruce to array.
#---END: loop through the users who classified the image
#---------------------------------------------------------------------------------------------------------------------
else:
print 'number of users who classified does not equal classification count?'
print 'exiting....'
exit()
# calculate the median number of components for both IR and radio for each object in image.
#radio_med = np.median(radio_comp) # median number of radio components
#Ncomp_radio = np.size(np.where(radio_comp == radio_med)) # number of classifications = median number
#ir_med = np.median(ir_comp) # median number of infrared components
#Ncomp_ir = np.size(np.where(ir_comp == ir_med)) # number of classifications = median number
# print >> compfile,'{0:10} {1:7d} {2:7d} {3:11.1f} {4:7d} {5:11.1f}'.format(srcid,len(radio_comp),Ncomp_radio,radio_med,Ncomp_ir,ir_med)
annfile.close()
classfile2.close()
# print >> classfile2,' '
# print >> classfile2,'Source.....................................................................................: %s' % srcid
# print >> classfile2,'Number of users who classified the object..................................................: %d' % len(radio_comp)
# print >> classfile2,'Number of users who classified the radio source with the median value of radio components..: %d' % Ncomp_radio
# print >> classfile2,'Median value of radio components from the users............................................: %f' % radio_med
# print >> classfile2,'Number of users who classified the IR source with the median value of IR components........: %d' % Ncomp_ir
# print >> classfile2,'Median value of IFcomponents from the users................................................: %f' % ir_med
#---END: loop through images/subjects
#------------------------------------------------------------------------------------------------------------------------
classfile.close()
#compfile.close()
|
willettk/rgz-analysis
|
python/betatest.py
|
Python
|
mit
| 14,268
|
[
"Galaxy"
] |
5fdacf164e01b0bdf23efcf90a8e0ee3d525ad9e63207d99734124ce62c5ad7b
|
#!/usr/bin/env python
'''Generate a Python ctypes wrapper file for a header file.
Usage example::
wrap.py -lGL -oGL.py /usr/include/GL/gl.h
>>> from GL import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypesparser import *
import textwrap
import sys
class CtypesWrapper(CtypesParser, CtypesTypeVisitor):
file=None
def begin_output(self, output_file, library, link_modules=(),
emit_filenames=(), all_headers=False):
self.library = library
self.file = output_file
self.all_names = []
self.known_types = {}
self.structs = set()
self.enums = set()
self.emit_filenames = emit_filenames
self.all_headers = all_headers
self.linked_symbols = {}
for name in link_modules:
module = __import__(name, globals(), locals(), ['foo'])
for symbol in dir(module):
if symbol not in self.linked_symbols:
self.linked_symbols[symbol] = '%s.%s' % (name, symbol)
self.link_modules = link_modules
self.print_preamble()
self.print_link_modules_imports()
def wrap(self, filename, source=None):
assert self.file, 'Call begin_output first'
self.parse(filename, source)
def end_output(self):
self.print_epilogue()
self.file = None
def does_emit(self, symbol, filename):
return self.all_headers or filename in self.emit_filenames
def print_preamble(self):
import textwrap
import time
print >> self.file, textwrap.dedent("""
'''Wrapper for %(library)s
Generated with:
%(argv)s
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library(%(library)r)
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
""" % {
'library': self.library,
'date': time.ctime(),
'class': self.__class__.__name__,
'argv': ' '.join(sys.argv),
}).lstrip()
def print_link_modules_imports(self):
for name in self.link_modules:
print >> self.file, 'import %s' % name
print >> self.file
def print_epilogue(self):
print >> self.file
print >> self.file, '\n'.join(textwrap.wrap(
'__all__ = [%s]' % ', '.join([repr(n) for n in self.all_names]),
width=78,
break_long_words=False))
def handle_ctypes_constant(self, name, value, filename, lineno):
if self.does_emit(name, filename):
print >> self.file, '%s = %r' % (name, value),
print >> self.file, '\t# %s:%d' % (filename, lineno)
self.all_names.append(name)
def handle_ctypes_type_definition(self, name, ctype, filename, lineno):
if self.does_emit(name, filename):
self.all_names.append(name)
if name in self.linked_symbols:
print >> self.file, '%s = %s' % \
(name, self.linked_symbols[name])
else:
ctype.visit(self)
self.emit_type(ctype)
print >> self.file, '%s = %s' % (name, str(ctype)),
print >> self.file, '\t# %s:%d' % (filename, lineno)
else:
self.known_types[name] = (ctype, filename, lineno)
def emit_type(self, t):
t.visit(self)
for s in t.get_required_type_names():
if s in self.known_types:
if s in self.linked_symbols:
print >> self.file, '%s = %s' % (s, self.linked_symbols[s])
else:
s_ctype, s_filename, s_lineno = self.known_types[s]
s_ctype.visit(self)
self.emit_type(s_ctype)
print >> self.file, '%s = %s' % (s, str(s_ctype)),
print >> self.file, '\t# %s:%d' % (s_filename, s_lineno)
del self.known_types[s]
def visit_struct(self, struct):
if struct.tag in self.structs:
return
self.structs.add(struct.tag)
base = {True: 'Union', False: 'Structure'}[struct.is_union]
print >> self.file, 'class struct_%s(%s):' % (struct.tag, base)
print >> self.file, ' __slots__ = ['
if not struct.opaque:
for m in struct.members:
print >> self.file, " '%s'," % m[0]
print >> self.file, ' ]'
# Set fields after completing class, so incomplete structs can be
# referenced within struct.
for name, typ in struct.members:
self.emit_type(typ)
print >> self.file, 'struct_%s._fields_ = [' % struct.tag
if struct.opaque:
print >> self.file, " ('_opaque_struct', c_int)"
self.structs.remove(struct.tag)
else:
for m in struct.members:
print >> self.file, " ('%s', %s)," % (m[0], m[1])
print >> self.file, ']'
print >> self.file
def visit_enum(self, enum):
if enum.tag in self.enums:
return
self.enums.add(enum.tag)
print >> self.file, 'enum_%s = c_int' % enum.tag
for name, value in enum.enumerators:
self.all_names.append(name)
print >> self.file, '%s = %d' % (name, value)
def handle_ctypes_function(self, name, restype, argtypes, filename, lineno):
if self.does_emit(name, filename):
# Also emit any types this func requires that haven't yet been
# written.
self.emit_type(restype)
for a in argtypes:
self.emit_type(a)
self.all_names.append(name)
print >> self.file, '# %s:%d' % (filename, lineno)
print >> self.file, '%s = _lib.%s' % (name, name)
print >> self.file, '%s.restype = %s' % (name, str(restype))
print >> self.file, '%s.argtypes = [%s]' % \
(name, ', '.join([str(a) for a in argtypes]))
print >> self.file
def handle_ctypes_variable(self, name, ctype, filename, lineno):
# This doesn't work.
#self.all_names.append(name)
#print >> self.file, '%s = %s.indll(_lib, %r)' % \
# (name, str(ctype), name)
pass
def main(*argv):
import optparse
import sys
import os.path
usage = 'usage: %prog [options] <header.h>'
op = optparse.OptionParser(usage=usage)
op.add_option('-o', '--output', dest='output',
help='write wrapper to FILE', metavar='FILE')
op.add_option('-l', '--library', dest='library',
help='link to LIBRARY', metavar='LIBRARY')
op.add_option('-D', '--define', dest='defines', default=[],
help='define token NAME=VALUE', action='append')
op.add_option('-i', '--include-file', action='append', dest='include_files',
help='assume FILE is iincluded', metavar='FILE',
default=[])
op.add_option('-I', '--include-dir', action='append', dest='include_dirs',
help='add DIR to include search path', metavar='DIR',
default=[])
op.add_option('-m', '--link-module', action='append', dest='link_modules',
help='use symbols from MODULE', metavar='MODULE',
default=[])
op.add_option('-a', '--all-headers', action='store_true',
dest='all_headers',
help='include symbols from all headers', default=False)
(options, args) = op.parse_args(list(argv[1:]))
if len(args) < 1:
print >> sys.stderr, 'No header files specified.'
sys.exit(1)
headers = args
if options.library is None:
options.library = os.path.splitext(headers[0])[0]
if options.output is None:
options.output = '%s.py' % options.library
wrapper = CtypesWrapper()
wrapper.begin_output(open(options.output, 'w'),
library=options.library,
emit_filenames=headers,
link_modules=options.link_modules,
all_headers=options.all_headers)
wrapper.preprocessor_parser.include_path += options.include_dirs
for define in options.defines:
name, value = define.split('=')
wrapper.preprocessor_parser.define(name, value)
for file in options.include_files:
wrapper.wrap(file)
for header in headers:
wrapper.wrap(header)
wrapper.end_output()
print 'Wrapped to %s' % options.output
if __name__ == '__main__':
main(*sys.argv)
|
shaileshgoogler/pyglet
|
tools/wraptypes/wrap.py
|
Python
|
bsd-3-clause
| 9,533
|
[
"VisIt"
] |
60ec3d2e4cb2e98c2330ff16e252b608f1850d79264897b01502451c7b5a031f
|
import datetime
import unittest
from eobs import Eobs
tg = Eobs(r'c:\m\ghs\data\e-obs\tg_0.25deg_reg_v12.0.nc')
# Paris coordinates and corresponding indices obtained from manual lookup in
# version 12 of the "tg" netCDF:
lat = 48.66
lon = 2.35
lat_idx = 93
lon_idx = 171
class TestEobs(unittest.TestCase):
def test_lat2index(self):
lat_index = tg.lat2index(lat)
self.assertEqual(lat_index, lat_idx)
# Check that the latitude referred to by this index represents the
# grid square that the original latitude falls within
lat2 = tg.variables['latitude'][lat_index]
self.assertTrue(lat2 - tg.gridsize <= lat <= lat2 + tg.gridsize)
def test_lon2index(self):
lon_index = tg.lon2index(lon)
self.assertEqual(lon_index, lon_idx)
# Check that the latitude referred to by this index represents the
# grid square that original latitude falls within
lon2 = tg.variables['longitude'][lon_index]
self.assertTrue(lon2 - tg.gridsize <= lon <= lon2 + tg.gridsize)
def test_getitem_with_date_lat_lon(self):
t = tg[datetime.datetime(1950, 1, 1), lat, lon]
# Using lat-lon indices that have been looked up
self.assertEqual(t, tg.variable[0, lat_idx, lon_idx])
|
woodcrafty/eobs
|
tests/test_eobs.py
|
Python
|
bsd-3-clause
| 1,285
|
[
"NetCDF"
] |
fb4bf271995a3f15f8998feb3ff28a65dbf0ca8aa85a8e42b28c888bba096b0f
|
#!/usr/bin/env python
print "HANDLING IMPORTS..."
import sys
import os
import time
import operator
import math
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import interpolate
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
import itertools
import pickle
import theano
import theano.tensor as T
from lasagne import random as lasagne_random
from lasagne import layers as l
from lasagne import nonlinearities
from lasagne import init
from lasagne import objectives
from lasagne import updates
from lasagne import regularization
from utils import batch_generator as bg
print "...DONE!"
sys.setrecursionlimit(10000)
######################## CONFIG #########################
#Fixed random seed
RANDOM_SEED = 1337
RANDOM = np.random.RandomState(RANDOM_SEED)
lasagne_random.set_rng(RANDOM)
#Dataset params
DATASET_PATH = 'dataset/train/spec/'
MIN_SAMPLES_PER_CLASS = -1
MAX_SAMPLES_PER_CLASS = None
SORT_CLASSES_ALPHABETICALLY = True
VAL_SPLIT = 0.1
USE_CACHE = False
#Multi-Label Params
MULTI_LABEL = False
VAL_HAS_MULTI_LABEL = False
MEAN_TARGETS_PER_IMAGE = 3
#Image params
IM_SIZE = (512, 256) #(width, height)
IM_DIM = 1
IM_AUGMENTATION = {#'type':[probability, value]
'roll':[0.5, (0.0, 0.05)],
#'noise':[0.1, 0.01],
#'brightness':[0.5, (0.25, 1.25)],
#'crop':[0.5, 0.07],
#'flip': [0.25, 1]
}
#General model params
DROPOUT = 0.5
NONLINEARITY = nonlinearities.rectify
INIT_GAIN = math.sqrt(2)
#Training params
BATCH_SIZE = 32
LEARNING_RATE = {0:0.001, 55:0.000001} #epoch:lr
LR_DESCENT = True
L2_WEIGHT = 0 #1e-4
OPTIMIZER='adam' #'adam' or 'nesterov'
EPOCHS = 55
RANDOMIZE_TRAIN_SET = True
#Confusion matrix params
CONFMATRIX_MAX_CLASSES = 20
NORMALIZE_CONFMATRIX = True
#Model import/export params
MODEL_PATH = 'model/'
PRETRAINED_MODEL = None #'pretrained_model.pkl'
LOAD_OUTPUT_LAYER = True
EPOCH_START = 1
RUN_NAME = 'Example_Run'
SIMPLE_LOG_MODE = True
SNAPSHOT_EPOCHS = [10, 20, 30, 40, 50] #[-1] saves after every epoch
SAVE_AFTER_INTERRUPT = True
################### DATASAT HANDLING ####################
def parseDataset():
#we use subfolders as class labels
classes = [folder for folder in sorted(os.listdir(DATASET_PATH))]
if not SORT_CLASSES_ALPHABETICALLY:
classes = shuffle(classes, random_state=RANDOM)
#now we enlist all image paths for each class
images = []
tclasses = []
sample_count = {}
for c in classes:
c_images = [os.path.join(DATASET_PATH, c, path) for path in os.listdir(os.path.join(DATASET_PATH, c))][:MAX_SAMPLES_PER_CLASS]
sample_count[c] = len(c_images)
images += c_images
#Do we want to correct class imbalance?
#This will affect validation scores as we use some samples in TRAIN and VAL
while sample_count[c] < MIN_SAMPLES_PER_CLASS:
images += [c_images[RANDOM.randint(0, len(c_images))]]
sample_count[c] += 1
#shuffle image paths
images = shuffle(images, random_state=RANDOM)
#validation split
vsplit = int(len(images) * VAL_SPLIT)
train = images[:-vsplit]
val = images[-vsplit:]
#show classes if needed for testing
#print classes
#show some stats
print "CLASSES:", len(classes)
print "CLASS LABELS:", sorted(sample_count.items(), key=operator.itemgetter(1))
print "TRAINING IMAGES:", len(train)
print "VALIDATION IMAGES:", len(val)
return classes, train, val
#parse dataset
CLASSES, TRAIN, VAL = parseDataset()
NUM_CLASSES = len(CLASSES)
#################### BATCH HANDLING #####################
CACHE = {}
def openImage(path, useCache=USE_CACHE):
global CACHE
#using a dict {path:image} cache saves some time after first epoch
#but may consume a lot of RAM
if path in CACHE:
return CACHE[path]
else:
#open image
img = cv2.imread(path)
#DEBUG
try:
h, w = img.shape[:2]
except:
print "IMAGE NONE-TYPE:", path
#original image dimensions
try:
h, w, d = img.shape
#to gray?
if IM_DIM == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except:
h, w = img.shape
#to color?
if IM_DIM == 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
#resize to conv input size
img = cv2.resize(img, (IM_SIZE[0], IM_SIZE[1]))
#convert to floats between 0 and 1
img = np.asarray(img / 255., dtype='float32')
if useCache:
CACHE[path] = img
return img
def imageAugmentation(img):
AUG = IM_AUGMENTATION
#Random Crop (without padding)
if 'crop' in AUG and RANDOM.choice([True, False], p=[AUG['crop'][0], 1 - AUG['crop'][0]]):
h, w = img.shape[:2]
cropw = RANDOM.randint(1, int(float(w) * AUG['crop'][1]))
croph = RANDOM.randint(1, int(float(h) * AUG['crop'][1]))
img = img[croph:-croph, cropw:-cropw]
img = cv2.resize(img, (IM_SIZE[0], IM_SIZE[1]))
#Flip - 1 = Horizontal, 0 = Vertical
if 'flip' in AUG and RANDOM.choice([True, False], p=[AUG['flip'][0], 1 - AUG['flip'][0]]):
img = cv2.flip(img, AUG['flip'][1])
#Wrap shift (roll up/down and left/right)
if 'roll' in AUG and RANDOM.choice([True, False], p=[AUG['roll'][0], 1 - AUG['roll'][0]]):
img = np.roll(img, int(img.shape[0] * (RANDOM.uniform(-AUG['roll'][1][1], AUG['roll'][1][1]))), axis=0)
img = np.roll(img, int(img.shape[1] * (RANDOM.uniform(-AUG['roll'][1][0], AUG['roll'][1][0]))), axis=1)
#substract/add mean
if 'mean' in AUG and RANDOM.choice([True, False], p=[AUG['mean'][0], 1 - AUG['mean'][0]]):
img += np.mean(img) * AUG['mean'][1]
#gaussian noise
if 'noise' in AUG and RANDOM.choice([True, False], p=[AUG['noise'][0], 1 - AUG['noise'][0]]):
img += RANDOM.normal(0.0, RANDOM.uniform(0, AUG['noise'][1]**0.5), img.shape)
img = np.clip(img, 0.0, 1.0)
#adjust brightness
if 'brightness' in AUG and RANDOM.choice([True, False], p=[AUG['brightness'][0], 1 - AUG['brightness'][0]]):
img *= RANDOM.uniform(AUG['brightness'][1][0], AUG['brightness'][1][1])
img = np.clip(img, 0.0, 1.0)
#show
#cv2.imshow("AUG", img)#.reshape(IM_SIZE[1], IM_SIZE[0], IM_DIM))
#cv2.waitKey(-1)
return img
def loadImageAndTarget(path, doAugmentation=True):
#here we open the image
img = openImage(path)
#image augmentation?
if IM_AUGMENTATION != None and doAugmentation:
img = imageAugmentation(img)
#we want to use subfolders as class labels
label = path.split("/")[-2]
#we need to get the index of our label from CLASSES
index = CLASSES.index(label)
#allocate array for target
target = np.zeros((NUM_CLASSES), dtype='float32')
#we set our target array = 1.0 at our label index, all other entries remain 0.0
target[index] = 1.0
#transpose image if dim=3
try:
img = np.transpose(img, (2, 0, 1))
except:
pass
#we need a 4D-vector for our image and a 2D-vector for our targets
img = img.reshape(-1, IM_DIM, IM_SIZE[1], IM_SIZE[0])
target = target.reshape(-1, NUM_CLASSES)
return img, target
def getAugmentedBatches(x, y):
#augment batch until desired number of target labels per image is reached
while np.mean(np.sum(y, axis=1)) < MEAN_TARGETS_PER_IMAGE:
#get two images to combine (we try to prevent i == j (which could result in infinite loops) with excluding ranges)
i = RANDOM.choice(range(1, x.shape[0] - 1))
j = RANDOM.choice(range(0, i) + range(i + 1, x.shape[0]))
#add images
x[i] += x[j]
#re-normalize new image
x[i] -= x[i].min(axis=None)
x[i] /= x[i].max(axis=None)
#combine targets (makes this task a multi-label classification!)
y[i] = np.logical_or(y[i], y[j])
#TODO: We still might end up in an infinite loop
#and should add a break in case something is fishy
#show
#cv2.imshow("BA", x[i].reshape(IM_SIZE[1], IM_SIZE[0], IM_DIM))
#cv2.waitKey(-1)
return x, y
def getDatasetChunk(split):
#get batch-sized chunks of image paths
for i in xrange(0, len(split), BATCH_SIZE):
yield split[i:i+BATCH_SIZE]
def getNextImageBatch(split=TRAIN, doAugmentation=True, batchAugmentation=MULTI_LABEL):
#fill batch
for chunk in getDatasetChunk(split):
#allocate numpy arrays for image data and targets
x_b = np.zeros((BATCH_SIZE, IM_DIM, IM_SIZE[1], IM_SIZE[0]), dtype='float32')
y_b = np.zeros((BATCH_SIZE, NUM_CLASSES), dtype='float32')
ib = 0
for path in chunk:
try:
#load image data and class label from path
x, y = loadImageAndTarget(path, doAugmentation)
#pack into batch array
x_b[ib] = x
y_b[ib] = y
ib += 1
except:
continue
#trim to actual size
x_b = x_b[:ib]
y_b = y_b[:ib]
#batch augmentation?
if batchAugmentation and x_b.shape[0] >= BATCH_SIZE // 2:
x_b, y_b = getAugmentedBatches(x_b, y_b)
#instead of return, we use yield
yield x_b, y_b
################## BUILDING THE MODEL ###################
def buildModel():
print "BUILDING MODEL TYPE..."
#default settings
filters = 64
first_stride = 2
last_filter_multiplier = 16
#input layer
net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))
#conv layers
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)
#dense layers
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.DropoutLayer(net, DROPOUT)
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.DropoutLayer(net, DROPOUT)
#Classification Layer
if MULTI_LABEL:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
else:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))
print "...DONE!"
#model stats
print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
print "MODEL HAS", l.count_params(net), "PARAMS"
return net
NET = buildModel()
################## MODEL SAVE/LOAD ####################
BEST_MODEL = None
BEST_EPOCH = 0
def saveModel(epoch, model=None):
print "EXPORTING MODEL...",
if model == None:
model = NET
net_filename = MODEL_PATH + "AED_" + RUN_NAME + "_model_epoch_" + str(epoch) + ".pkl"
if not os.path.exists(MODEL_PATH):
os.makedirs(MODEL_PATH)
with open(net_filename, 'w') as f:
#We want to save the model architecture with all params and trained classes
data = {'net': model, 'classes':CLASSES, 'run_name': RUN_NAME, 'epoch':epoch, 'im_size':IM_SIZE, 'im_dim':IM_DIM}
pickle.dump(data, f)
print "DONE!"
def loadModel(filename):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
data = pickle.load(f)
#for training, we only want to load the model params
net = data['net']
params = l.get_all_param_values(net)
if LOAD_OUTPUT_LAYER:
l.set_all_param_values(NET, params)
else:
l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
print "DONE!"
if PRETRAINED_MODEL != None:
loadModel(PRETRAINED_MODEL)
#################### LOSS FUNCTION ######################
def calc_loss(prediction, targets):
#categorical crossentropy is the best choice for a multi-class softmax output
loss = T.mean(objectives.categorical_crossentropy(prediction, targets))
return loss
def calc_loss_multi(prediction, targets):
#we need to clip predictions when calculating the log-loss
prediction = T.clip(prediction, 0.0000001, 0.9999999)
#binary crossentropy is the best choice for a multi-class sigmoid output
loss = T.mean(objectives.binary_crossentropy(prediction, targets))
return loss
#theano variable for the class targets
targets = T.matrix('targets', dtype=theano.config.floatX)
#get the network output
prediction = l.get_output(NET)
#we use L2 Norm for regularization
l2_reg = regularization.regularize_layer_params(NET, regularization.l2) * L2_WEIGHT
#calculate the loss
if MULTI_LABEL:
loss = calc_loss_multi(prediction, targets) + l2_reg
else:
loss = calc_loss(prediction, targets) + l2_reg
################# ACCURACY FUNCTION #####################
def calc_accuracy(prediction, targets):
#we can use the lasagne objective categorical_accuracy to determine the top1 single label accuracy
a = T.mean(objectives.categorical_accuracy(prediction, targets, top_k=1))
return a
def calc_accuracy_multi(prediction, targets):
#we can use the lasagne objective binary_accuracy to determine the multi label accuracy
a = T.mean(objectives.binary_accuracy(prediction, targets))
return a
#calculate accuracy
if MULTI_LABEL and VAL_HAS_MULTI_LABEL:
accuracy = calc_accuracy_multi(prediction, targets)
else:
accuracy = calc_accuracy(prediction, targets)
####################### UPDATES #########################
#we use dynamic learning rates which change after some epochs
lr_dynamic = T.scalar(name='learning_rate')
#get all trainable parameters (weights) of our net
params = l.get_all_params(NET, trainable=True)
#we use the adam update
if OPTIMIZER == 'adam':
param_updates = updates.adam(loss, params, learning_rate=lr_dynamic, beta1=0.5)
elif OPTIMIZER == 'nesterov':
param_updates = updates.nesterov_momentum(loss, params, learning_rate=lr_dynamic, momentum=0.9)
#################### TRAIN FUNCTION ######################
#the theano train functions takes images and class targets as input
print "COMPILING THEANO TRAIN FUNCTION...",
start = time.time()
train_net = theano.function([l.get_all_layers(NET)[0].input_var, targets, lr_dynamic], loss, updates=param_updates)
print "DONE! (", int(time.time() - start), "s )"
################# PREDICTION FUNCTION ####################
#we need the prediction function to calculate the validation accuracy
#this way we can test the net during/after training
net_output = l.get_output(NET, deterministic=True)
print "COMPILING THEANO TEST FUNCTION...",
start = time.time()
test_net = theano.function([l.get_all_layers(NET)[0].input_var, targets], [net_output, loss, accuracy])
print "DONE! (", int(time.time() - start), "s )"
################## CONFUSION MATRIX #####################
cmatrix = []
def clearConfusionMatrix():
global cmatrix
#allocate empty matrix
cmatrix = np.zeros((NUM_CLASSES, NUM_CLASSES), dtype='int32')
def updateConfusionMatrix(p, t):
global cmatrix
#get class indices for prediction and target
targets = np.argmax(t, axis=1)
predictions = np.argmax(p, axis=1)
#add up confusion matrices of validation batches
cmatrix += confusion_matrix(targets, predictions, labels=range(0, NUM_CLASSES))
def showConfusionMatrix(epoch):
#new figure
plt.figure(0, figsize=(35, 35), dpi=72)
plt.clf()
#get additional metrics
pr, re, f1 = calculateMetrics()
#normalize?
if NORMALIZE_CONFMATRIX:
global cmatrix
cmatrix = np.around(cmatrix.astype('float') / cmatrix.sum(axis=1)[:, np.newaxis] * 100.0, decimals=1)
#show matrix
plt.imshow(cmatrix[:CONFMATRIX_MAX_CLASSES, :CONFMATRIX_MAX_CLASSES], interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix\n' +
RUN_NAME + ' - Epoch ' + str(epoch) +
'\nTrain Samples: ' + str(len(TRAIN)) + ' Validation Samples: ' + str(len(VAL)) +
'\nmP: ' + str(np.mean(pr)) + ' mF1: ' + str( np.mean(f1)), fontsize=32)
#tick marks
tick_marks = np.arange(min(CONFMATRIX_MAX_CLASSES, NUM_CLASSES))
plt.xticks(tick_marks, CLASSES[:CONFMATRIX_MAX_CLASSES], rotation=90)
plt.yticks(tick_marks, CLASSES[:CONFMATRIX_MAX_CLASSES])
#labels
thresh = cmatrix.max() / 2.
for i, j in itertools.product(range(min(CONFMATRIX_MAX_CLASSES, cmatrix.shape[0])), range(min(CONFMATRIX_MAX_CLASSES, cmatrix.shape[1]))):
plt.text(j, i, cmatrix[i, j],
horizontalalignment="center", verticalalignment="center",
color="white" if cmatrix[i, j] > thresh else "black", fontsize=32)
#axes labels
plt.tight_layout()
plt.ylabel('Target label', fontsize=32)
plt.xlabel('Predicted label', fontsize=32)
#fontsize
plt.rc('font', size=32)
#save plot
global cmcnt
if not os.path.exists('confmatrix'):
os.makedirs('confmatrix')
plt.savefig('confmatrix/' + RUN_NAME + '_' + str(epoch) + '.png')
def calculateMetrics():
#allocate arrays
pr = []
re = []
f1 = []
#parse rows and columns of confusion matrix
for i in range(0, cmatrix.shape[0]):
#true positives, false positves, false negatives
tp = float(cmatrix[i][i])
fp = float(np.sum(cmatrix, axis=1)[i] - tp)
fn = float(np.sum(cmatrix, axis=0)[i] - tp)
#precision
if tp > 0 or fp > 0:
p = tp / (tp + fp)
else:
p = 0
pr.append(p)
#recall
if tp > 0 or fn > 0:
r = tp / (tp + fn)
else:
r = 0
re.append(r)
#f1 measure
if p > 0 or r > 0:
f = 2 * ((p * r) / (p + r))
else:
f = 0
f1.append(f)
return pr, re, f1
###################### PROGRESS #########################
batches_per_epoch = len(TRAIN + VAL) // BATCH_SIZE + 1
avg_duration = []
last_update = -1
def showProgress(stat, duration, current, end=batches_per_epoch, update_interval=5, simple_mode=False):
#epochs might take a lot of time, so we want some kind of progress bar
#this approach is not very sophisticated, but it does the job :)
#you should use simple_mode=True if run with IDLE and simple_mode=False if run on command line
global avg_duration
global last_update
#time left
avg_duration.append(duration)
avg_duration = avg_duration[-10:]
r = int(abs(end - current) * np.mean(avg_duration) / 60) + 1
#percentage
p = int(current / float(end) * 100)
progress = ""
for s in xrange(update_interval, 100, update_interval):
if s <= p:
progress += "="
else:
progress += " "
#status line
if p > last_update and p % update_interval == 0 or last_update == -1:
if simple_mode:
if current == 1:
print stat.upper() + ": [",
else:
print "=",
if current == end:
print "]",
else:
print stat.upper() + ": [" + progress + "] BATCHES " + str(current) + "/" + str(end) + " (" + str(p) + "%) - " + str(r) + " min REMAINING\r",
last_update = p
###################### TRAINING #########################
print "START TRAINING..."
train_loss = []
val_loss = []
val_accuracy = []
max_acc = -1
lr = LEARNING_RATE[LEARNING_RATE.keys()[0]]
SAVE_MODEL_AFTER_TRAINING = True
#train for some epochs...
for epoch in range(EPOCH_START, EPOCHS + 1):
try:
#start timer
start = time.time()
#reset confusion matrix
clearConfusionMatrix()
#adjust learning rate (interpolate or steps)
if LR_DESCENT:
lr_keys = np.array(LEARNING_RATE.keys() + [EPOCHS], dtype='float32')
lr_values = np.array(LEARNING_RATE.values() + [LEARNING_RATE.values()[-1]], dtype='float32')
lr_func = interpolate.interp1d(lr_keys, lr_values, kind='linear')
lr = np.float32(lr_func(max(LEARNING_RATE.keys()[0], epoch - 1)))
else:
if epoch in LEARNING_RATE:
lr = LEARNING_RATE[epoch]
#shuffle dataset (this way we get "new" batches every epoch)
if RANDOMIZE_TRAIN_SET:
TRAIN = shuffle(TRAIN, random_state=RANDOM)
#time
bstart = time.time()
last_update = -1
#iterate over train split batches and calculate mean loss for epoch
t_l = []
bcnt = 0
for image_batch, target_batch in bg.threadedBatchGenerator(getNextImageBatch()):
#calling the training functions returns the current loss
loss = train_net(image_batch, target_batch, lr)
t_l.append(loss)
bcnt += 1
#show progress
showProgress("EPOCH " + str(epoch), (time.time() - bstart), bcnt, simple_mode=SIMPLE_LOG_MODE)
bstart = time.time()
#we validate our net every epoch and pass our validation split through as well
v_l = []
v_a = []
for image_batch, target_batch in bg.threadedBatchGenerator(getNextImageBatch(VAL, False, VAL_HAS_MULTI_LABEL)):
#calling the test function returns the net output, loss and accuracy
prediction_batch, loss, acc = test_net(image_batch, target_batch)
v_l.append(loss)
v_a.append(acc)
#save predicions and targets for confusion matrix
updateConfusionMatrix(prediction_batch, target_batch)
bcnt += 1
#show progress
showProgress("EPOCH " + str(epoch), (time.time() - bstart), bcnt, simple_mode=SIMPLE_LOG_MODE)
bstart = time.time()
#stop timer
end = time.time()
#calculate stats for epoch
train_loss.append(np.mean(t_l))
val_loss.append(np.mean(v_l))
val_accuracy.append(np.mean(v_a))
#print stats for epoch
print "TRAIN LOSS:", train_loss[-1],
print "VAL LOSS:", val_loss[-1],
print "VAL ACCURACY:", (int(val_accuracy[-1] * 1000) / 10.0), "%",
print "LR:", lr,
print "TIME:", (int((end - start) * 10) / 10.0), "s"
#log max accuracy and save best params
acc = (int(val_accuracy[-1] * 1000) / 10.0)
if acc >= max_acc:
max_acc = acc
BEST_MODEL = NET
BEST_EPOCH = epoch
#show confusion matrix
showConfusionMatrix(epoch)
#save snapshot?
if epoch in SNAPSHOT_EPOCHS or SNAPSHOT_EPOCHS[0] == -1:
saveModel(epoch)
except KeyboardInterrupt:
SAVE_MODEL_AFTER_TRAINING = SAVE_AFTER_INTERRUPT
break
print "TRAINING DONE!"
print "MAX ACC: ", max_acc
#save best model params
if SAVE_MODEL_AFTER_TRAINING:
saveModel(BEST_EPOCH, BEST_MODEL)
|
kahst/AcousticEventDetection
|
AED_train.py
|
Python
|
mit
| 24,412
|
[
"Gaussian"
] |
9ffc25df7ffe1e42b9f3f8f4821ae286d96c47ac3edb4f98c4332ba6017b9344
|
"""
process delegator service
Must be executable by
python -m pycsp.parallel.server
Used to handle the creation of external processes (sshprocess / clusterprocess)
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
import sys
import os
# Extract paramenters
cwd, ip, port, input_name = sys.argv[1:]
# Change to working dir
os.chdir(cwd)
# Load pycsp modules
from pycsp.parallel.clusterprocess import NodePlacement
from pycsp.parallel.process import init
from pycsp.parallel import *
# Init main process
init()
# Connect to channel
input_chan = Channel(input_name, connect=(ip, int(port)))
# Get channel ends
input_chan_end = input_chan.reader()
# PyCSP MultiProcess (Spawn)
def RunFunc(output_chan_name, fn, args, kwargs):
output_chan = Channel(output_chan_name, connect=(ip, int(port)))
send_return_value = output_chan.writer()
val = Parallel(Process(fn, *args, **kwargs))
send_return_value(val[0])
try:
while True:
# Retrive args, kwargs
val = input_chan_end()
if len(val) == 7:
h, p, output_chan_name, scriptPath, funcName, args, kwargs = val
else:
# For clusterprocesses
h, p, output_chan_name, scriptPath, funcName, args, kwargs, available_nodes = val
if available_nodes:
nodefile, group_state = available_nodes
NodePlacement().set_nodegroup(nodefile, group_state)
# Load script
sys.path.insert(0, os.path.dirname(scriptPath))
moduleName = os.path.basename(scriptPath)
if moduleName[-3:] == '.py':
moduleName = moduleName[:-3]
m = __import__(moduleName)
# Get function by
# 1. retrieve process factory function.
# 2. generate process from process factory
# 3. Fetch function from generated process
fn = getattr(m, funcName)().fn
# Spawn a runner
Spawn(MultiProcess(RunFunc, output_chan_name, fn, args, kwargs), pycsp_host=h, pycsp_port=p)
except ChannelPoisonException:
pass
# The rest of process clean up is done in shutdown
shutdown()
|
runefriborg/pycsp
|
pycsp/parallel/server.py
|
Python
|
mit
| 2,298
|
[
"Brian"
] |
307984056a3e8f8e37eafa7d5782a2c434ca9dc22bf3ecd88c9c39cd6f3d1618
|
import math
import numpy as np
import quaternion
import spinsfast
import spherical_functions as sf
import scri
import pytest
ABD = scri.AsymptoticBondiData
abd = scri.asymptotic_bondi_data
def construct_and_validate(modifier, validator, ell_max=8):
time = np.linspace(-100, 100, num=2001)
sigma, sigmadot, sigmaddot, psi2, psi1, psi0 = np.zeros((6, sf.LM_total_size(0, ell_max)), dtype=complex)
modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0)
abd = ABD.from_initial_values(time, ell_max, sigma, sigmadot, sigmaddot, psi2, psi1, psi0)
validator(abd)
return True
def check_modes(modes, nonzero_ℓm):
import numpy as np
import spherical_functions as sf
non_zero_indices = np.array([modes.index(ℓ, m) for ℓ, m in nonzero_ℓm], dtype=int)
zero_indices = np.array(list(set(range(sf.LM_total_size(0, modes.ell_max))) - set(non_zero_indices)), dtype=int)
assert not np.any(modes[..., zero_indices]), f"nonzero values among indices {zero_indices}"
for non_zero_index in non_zero_indices:
assert np.any(modes[..., non_zero_index]), f"no nonzero values at index {non_zero_index}"
def test0():
"""Set only terms that are forbidden by spin weights; ensure all zeros"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
def validator(abd):
check_modes(abd.psi0, [])
check_modes(abd.psi1, [])
check_modes(abd.psi2, [])
check_modes(abd.psi3, [])
check_modes(abd.psi4, [])
check_modes(abd.sigma, [])
assert np.max(np.abs(abd.bondi_violation_norms)) == 0.0
construct_and_validate(modifier, validator)
def test1():
"""Add ℓ=0 term to ψ₂ intial value
Ensures that first terms of ψ̇₁u = ðψ₂ + 2σψ₃ and ψ̇₀ = ðψ₁ + 3σψ₂ don't get excited
"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
# Nonsensical values that should have no effect
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
# Actual values that should carry through
psi2[sf.LM_index(0, 0, 0)] = 0.234
def validator(abd):
assert np.all(abd.psi2[..., 0] == 0.234)
check_modes(abd.psi0, [])
check_modes(abd.psi1, [])
check_modes(abd.psi2, [[0, 0]])
check_modes(abd.psi3, [])
check_modes(abd.psi4, [])
check_modes(abd.sigma, [])
assert np.max(np.abs(abd.bondi_violation_norms)) == 0.0
construct_and_validate(modifier, validator, ell_max=3)
def test2():
"""Add ℓ=1 term to ψ₂ intial value
Checks first term of ψ̇₁ = ðψ₂ + 2σψ₃
"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
# Nonsensical values that should have no effect
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
# Actual values that should carry through
psi2[sf.LM_index(0, 0, 0)] = 0.234
psi2[sf.LM_index(1, -1, 0)] = 0.345
def validator(abd):
check_modes(abd.psi0, [])
check_modes(abd.psi1, [[1, -1], [1, 1]])
check_modes(abd.psi2, [[0, 0], [1, -1], [1, 1]])
check_modes(abd.psi3, [])
check_modes(abd.psi4, [])
check_modes(abd.sigma, [])
assert np.max(np.abs(abd.bondi_violation_norms)) < 1e-13
construct_and_validate(modifier, validator, ell_max=4)
def test3():
"""Add ℓ=2 term to ψ₂ intial value
Checks first term of ψ̇₀ = ðψ₁ + 3σψ₂
"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
# Nonsensical values that should have no effect
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
# Actual values that should carry through
psi2[sf.LM_index(0, 0, 0)] = 0.234
psi2[sf.LM_index(1, -1, 0)] = 0.345
psi2[sf.LM_index(2, -2, 0)] = 0.456
def validator(abd):
assert np.all(abd.psi2[..., 0] == 0.234)
check_modes(abd.psi0, [[2, -2], [2, 2]])
check_modes(abd.psi1, [[1, -1], [1, 1], [2, -2], [2, 2]])
check_modes(abd.psi2, [[0, 0], [1, -1], [1, 1], [2, -2], [2, 2]])
check_modes(abd.psi3, [])
check_modes(abd.psi4, [])
check_modes(abd.sigma, [])
assert np.max(np.abs(abd.bondi_violation_norms)) < 4e-11
construct_and_validate(modifier, validator, ell_max=4)
def test4():
"""Add nonzero constant term to σ
Checks first term of ψ̇₁ = ðψ₂ + 2σψ₃ and second term of ψ̇₀ = ðψ₁ + 3σψ₂
After satisfaction of the reality condition on the mass aspect, ψ₂ has nonzero modes in
{(0, 0), (2, -2), (2, 2)}, so ψ₁ should have nonzero modes in {(2, -2), (2, 2)}. Here,
σ has only the nonzero mode (2, 2). Thus the product σψ₂ should result in nonzero
modes in {(2, -2), (2, 0), (2, 2), (3, 0), (4, 0), (4, 4)}.
"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
# Nonsensical values that should have no effect
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
# Actual values that should carry through
psi2[0] = 0.234
sigma[sf.LM_index(2, 2, 0)] = 0.5678
def validator(abd):
check_modes(abd.psi0, [[2, -2], [2, 0], [2, 2], [3, 0], [4, 0], [4, 4]])
check_modes(abd.psi1, [[2, -2], [2, 2]])
check_modes(abd.psi2, [[0, 0], [2, -2], [2, 2]])
check_modes(abd.psi3, [])
check_modes(abd.psi4, [])
check_modes(abd.sigma, [[2, 2]])
assert np.max(np.abs(abd.bondi_violation_norms)) <= 2e-10
construct_and_validate(modifier, validator, ell_max=6)
def test5():
"""Add nonzero ℓ=2 term to σ̇
Checks second term of ψ̇₁ = ðψ₂ + 2σψ₃
"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
# Nonsensical values that should have no effect
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
# Actual values that should carry through
sigma[sf.LM_index(2, 2, 0)] = 0.5678
sigmadot[sf.LM_index(2, 2, 0)] = 0.6789
def validator(abd):
check_modes(abd.psi0, [[2, -2], [2, 0], [2, 2], [3, 0], [4, 0], [4, 4]])
check_modes(abd.psi1, [[1, 0], [2, -2], [2, 0], [2, 2], [3, 0], [4, 0]])
check_modes(abd.psi2, [[2, -2], [2, 2]])
check_modes(abd.psi3, [[2, -2]])
check_modes(abd.psi4, [])
check_modes(abd.sigma, [[2, 2]])
assert np.max(np.abs(abd.bondi_violation_norms)) <= 7e-9
construct_and_validate(modifier, validator, ell_max=6)
def test6():
"""Add nonzero ℓ=2 term to σ̈
Checks second term of ψ̇₂ = ðψ₃ + σψ₄
"""
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
# Nonsensical values that should have no effect
psi0[: sf.LM_index(1, 1, 0)] = 1.234
psi1[0] = 0.123
sigma[: sf.LM_index(1, 1, 0)] = 0.567
sigmadot[: sf.LM_index(1, 1, 0)] = 0.678
sigmaddot[: sf.LM_index(1, 1, 0)] = 0.789
# Actual values that should carry through
sigmaddot[sf.LM_index(2, 2, 0)] = 0.1 / 10_000 ** 2 # 10_000 = max(time)**2
def validator(abd):
check_modes(abd.psi0, [[2, -2], [2, 0], [2, 2], [3, 0], [3, 2], [4, 0], [4, 2], [5, 2], [6, 2]])
check_modes(abd.psi1, [[1, 0], [2, -2], [2, 0], [3, 0], [4, 0]])
check_modes(abd.psi2, [[0, 0], [1, 0], [2, -2], [2, 0], [3, 0], [4, 0]])
check_modes(abd.psi3, [[2, -2]])
check_modes(abd.psi4, [[2, -2]])
check_modes(abd.sigma, [[2, 2]])
assert np.max(np.abs(abd.bondi_violation_norms)) <= 5e-8
construct_and_validate(modifier, validator, ell_max=7)
def test7():
"""Test random values for all ℓ modes"""
ell_max = 8
np.random.seed(1234)
def modifier(sigma, sigmadot, sigmaddot, psi2, psi1, psi0):
sigma[:] = 0.01 * np.random.rand(*(sigma.shape[:-1] + (sigma.shape[-1] * 2,))).view(complex)
sigmadot[:] = (0.01 / 100) * np.random.rand(*(sigma.shape[:-1] + (sigma.shape[-1] * 2,))).view(complex)
sigmaddot[:] = (0.01 / 100 ** 2) * np.random.rand(*(sigma.shape[:-1] + (sigma.shape[-1] * 2,))).view(complex)
def validator(abd):
check_modes(abd.psi0, sf.LM_range(abs(abd.psi0.s), ell_max))
check_modes(abd.psi1, sf.LM_range(abs(abd.psi1.s), ell_max))
check_modes(abd.psi2, sf.LM_range(abs(abd.psi2.s), ell_max))
check_modes(abd.psi3, sf.LM_range(2, ell_max))
check_modes(abd.psi4, sf.LM_range(abs(abd.psi4.s), ell_max))
check_modes(abd.sigma, sf.LM_range(abs(abd.sigma.s), ell_max))
assert np.max(np.abs(abd.bondi_violation_norms)) <= 4.5e-6
construct_and_validate(modifier, validator, ell_max=ell_max)
|
moble/scri
|
tests/test_abd_ivp.py
|
Python
|
mit
| 9,661
|
[
"Psi4"
] |
d9ebd13dfc9e170be2079ef573d37282616057dc805162a73805c9fcc6b2b0f9
|
import sys
from sympy import symbols,sin,cos
from printer import Format,xpdf,Get_Program,Print_Function
from ga import Ga
def Maxwells_Equations_in_Geom_Calculus():
Print_Function()
X = symbols('t x y z',real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1],coords=X)
I = st4d.i
B = st4d.mv('B','vector',f=True)
E = st4d.mv('E','vector',f=True)
B.set_coef(1,0,0)
E.set_coef(1,0,0)
B *= g0
E *= g0
J = st4d.mv('J','vector',f=True)
F = E+I*B
print r'\text{Pseudo Scalar\;\;}I =',I
print '\\text{Magnetic Field Bi-Vector\\;\\;} B = \\bm{B\\gamma_{t}} =',B
print '\\text{Electric Field Bi-Vector\\;\\;} E = \\bm{E\\gamma_{t}} =',E
print '\\text{Electromagnetic Field Bi-Vector\\;\\;} F = E+IB =',F
print '%\\text{Four Current Density\\;\\;} J =',J
gradF = st4d.grad*F
print '#Geom Derivative of Electomagnetic Field Bi-Vector'
gradF.Fmt(3,'grad*F')
print '#Maxwell Equations'
print 'grad*F = J'
print '#Div $E$ and Curl $H$ Equations'
(gradF.get_grade(1)-J).Fmt(3,'%\\grade{\\nabla F}_{1} -J = 0')
print '#Curl $E$ and Div $B$ equations'
(gradF.get_grade(3)).Fmt(3,'%\\grade{\\nabla F}_{3} = 0')
return
def Dirac_Equation_in_Geom_Calculus():
Print_Function()
coords = symbols('t x y z',real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1],coords=coords)
I = st4d.i
(m,e) = symbols('m e')
psi = st4d.mv('psi','spinor',f=True)
A = st4d.mv('A','vector',f=True)
sig_z = g3*g0
print '\\text{4-Vector Potential\\;\\;}\\bm{A} =',A
print '\\text{8-component real spinor\\;\\;}\\bm{\\psi} =',psi
dirac_eq = (st4d.grad*psi)*I*sig_z-e*A*psi-m*psi*g0
dirac_eq = dirac_eq.simplify()
dirac_eq.Fmt(3,r'%\text{Dirac Equation\;\;}\nabla \bm{\psi} I \sigma_{z}-e\bm{A}\bm{\psi}-m\bm{\psi}\gamma_{t} = 0')
return
def Lorentz_Tranformation_in_Geog_Algebra():
Print_Function()
(alpha,beta,gamma) = symbols('alpha beta gamma')
(x,t,xp,tp) = symbols("x t x' t'",real=True)
(st2d,g0,g1) = Ga.build('gamma*t|x',g=[1,-1])
from sympy import sinh,cosh
R = cosh(alpha/2)+sinh(alpha/2)*(g0^g1)
X = t*g0+x*g1
Xp = tp*g0+xp*g1
print 'R =',R
print r"#%t\bm{\gamma_{t}}+x\bm{\gamma_{x}} = t'\bm{\gamma'_{t}}+x'\bm{\gamma'_{x}} = R\lp t'\bm{\gamma_{t}}+x'\bm{\gamma_{x}}\rp R^{\dagger}"
Xpp = R*Xp*R.rev()
Xpp = Xpp.collect()
Xpp = Xpp.trigsimp()
print r"%t\bm{\gamma_{t}}+x\bm{\gamma_{x}} =",Xpp
Xpp = Xpp.subs({sinh(alpha):gamma*beta,cosh(alpha):gamma})
print r'%\f{\sinh}{\alpha} = \gamma\beta'
print r'%\f{\cosh}{\alpha} = \gamma'
print r"%t\bm{\gamma_{t}}+x\bm{\gamma_{x}} =",Xpp.collect()
return
def Lie_Group():
Print_Function()
coords = symbols('t x y z',real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1],coords=coords)
I = st4d.i
a = st4d.mv('a','vector')
B = st4d.mv('B','bivector')
print 'a =',a
print 'B =',B
print 'a|B =', a|B
print ((a|B)|B).simplify().Fmt(3,'(a|B)|B')
print (((a|B)|B)|B).simplify().Fmt(3,'((a|B)|B)|B')
return
def dummy():
return
def main():
Get_Program()
Format()
#Maxwells_Equations_in_Geom_Calculus()
#Dirac_Equation_in_Geom_Calculus()
#Lorentz_Tranformation_in_Geog_Algebra()
Lie_Group()
xpdf()
return
if __name__ == "__main__":
main()
|
rschwiebert/galgebra
|
examples/LaTeX/physics_check_latex.py
|
Python
|
bsd-3-clause
| 3,449
|
[
"DIRAC"
] |
21062c483c6fcf99526dabbda45eac8e1d0f609e890cf9fb06529aa1a228c3c2
|
import psi4
raw_data = """
BMK, -76.3467785966000037
MGGA_MS1, -76.3862847739000017
B1LYP, -76.3535264398000066
HCTH93, -76.3690726046999941
MPW1K, -76.3552293636000030
MGGA_MS2h, -76.3869717052999988
TPSSh, -76.3808997507999976
PBE0, -76.3004989885999976
LRC-wPBEh, -76.3099424376000002
B3PW91, -76.3571064930000034
wB97, -76.3673106896000036
BOP, -76.3668799865999972
B3P86, -76.4016411790999967
M11, -76.3543894538999979
SCAN, -76.3522497757000025
GAM, -76.3503638435999932
M08-HX, -76.3491939590000044
wB97X, -76.3624999407000047
wB97X-D3, -76.3623321139999973
B5050LYP, -76.3332203819999933
t-HCTH, -76.3696114174999963
M06, -76.3505221948999946
SOGGA, -75.9787679797999971
BLYP, -76.3664511366000056
X3LYP, -76.3549666532999964
M05-2X, -76.3677366188999969
O3LYP, -76.3591519817999966
MGGA_MVSh, -76.3353050983999992
B1B95, -76.3535218972999985
M05, -76.3485782220000004
PKZB, -76.2406589717000003
B97-3, -76.3648903124999947
B97-D, -76.3454776499000047
TPSS, -76.3892897844000061
SOGGA11, -76.3720443428999971
BB1K, -76.3467152802000015
SCAN0, -76.3379029626999994
M06-2X, -76.3484434676000063
PBEOP, -76.3127794731999956
MGGA_MS2, -76.3944046976999971
MN12-L, -76.3108543350000019
N12-SX, -76.3348361572000016
dlDF, -76.8411033690999972
LRC-BOP, -76.2292395779000032
BP86, -76.3866054652000059
HCTH120, -76.3758099710999971
EDF1, -76.3977353147000002
M08-SO, -76.3326735788999997
MGGA_MVS, -76.3421991937999991
revPBE0, -76.3463887599999964
M06-L, -76.3766412209999999
t-HCTHh, -76.3687407290999971
HCTH407, -76.3749332321999930
CAM-B3LYP, -76.3552395513000022
MPW1PW91, -76.3632704638999940
MPW1B95, -76.3499762600999929
HCTH147, -76.3796800707000045
M06-HF, -76.3434823813000065
B3LYP5, -76.3479655987000001
PW91, -76.3566898132999938
wM05-D, -76.3552889303999933
B3LYP, -76.3850629237000049
PBE, -76.2994454149000063
MPWB1K, -76.3440612003000041
B97-K, -76.3525019631999982
B97, -76.3580555827000040
B1PW91, -76.3655336964999947
VSXC, -76.4029233123000040
BHHLYP, -76.3412697403999942
BP86VWN, -76.3908805485999949
EDF2, -76.3447647211999936
B97-2, -76.3583905196999950
wM06-D3, -76.3595346591999942
MPW1PBE, -76.3392282126999930
B97-1, -76.3597077255000016
M11-L, -76.3618412049000028
LRC-wPBE, -76.3232922044000048
PWB6K, -76.4078451296000054
PW6B95, -76.4628530150999950
MN15-L, -76.3001793932999988
revTPSSh, -76.3534596164000021
SOGGA11-X, -76.3569679431000026
MN12-SX, -76.3286481310999960
wB97X-D, -76.3616399546999958
MPW1LYP, -76.3513127878999995
PBE50, -76.3022385245999999
MGGA_MS0, -76.3991111682999957
N12, -76.3300096080999992
""".splitlines()
qchem_data = {}
for n, l in enumerate(raw_data):
if not len(l): continue
s, e = l.split(',')
s = s.strip().upper()
qchem_data[s] = float(e)
psi_data = {'wblyp': -76.3825262028083, 'svwn': -76.01452450629559, 'b97-0':
-76.35806217660348, 'b97-1': -76.35971389422123, 'b97-2': -76.3583977125692,
'pbe0': -76.30050128362014, 'pw91': -76.3565672871516, 'b2plyp':
-76.29245491066388, 'bp86': -76.38654294632472, 'wpbe2': -76.29220261052656,
'wpbe0': -76.31862941307556, 'wb97x': -76.36530303256407, 'sogga':
-75.97862074697434, 'wsvwn': -76.14201931003304, 'hcth120': -76.37582142613623,
'ft97': float('NaN'), 'wpbe': -76.33419490671916, 'wb97': -76.36737258172461, 'hcth147':
-76.37969122520059, 'dldf+d': -76.84110282525018, 'blyp': -76.36645993768678,
'hcth': -76.36908457666645, 'hcth407': -76.37494441125874, 'wb97x-d':
-76.36166004385153, 'b86bpbe': -76.36414509170982, 'm05': -76.34856555284622,
'dldf': -76.84110282525016, 'b97-d': -76.34549159283519, 'pw86pbe':
-76.42095771366442, 'm05-2x': -76.36772387978453, 'wpbesol': -76.0621570060994,
'pbe': -76.29945413106864, 'hf_x': -75.98014193516957, 'pbe0-2':
-76.21332533493145, 'wpbesol0': -76.1033355799836, 'wpbe_c':
-67.60822454944358, 'dldf+d09': -76.84110282525018,
'b3lyp5': -76.34796919664903, 'b3lyp': -76.38506652122132}
psi_data = {k.upper() : v for k, v in psi_data.items()}
mol = psi4.geometry("""
0 1
O
H 1 1.0
H 1 1.0 2 104.5
""")
psi4.set_options({"GUESS": "CORE",
"BASIS": "6-31G",
"dft_spherical_points": 1202,
"dft_radial_points": 400,
"dft_nuclear_scheme": "NAIVE"})
psi4.core.set_output_file("output.dat", False)
keys = list(set(list(psi_data)) | set(list(qchem_data)))
#keys = [x for x in keys if "LYP" in x]
keys = ["WPBE"]
keys.sort()
print(" Functional: libxc : rob qchem")
for key in keys:
energy = 0
try:
energy = psi4.energy(key, molecule=mol)
except:
#print("Functional %s not found.. skipping" % key)
continue
qchem_energy = float('NaN')
psi_energy = float('NaN')
if key in list(qchem_data):
qchem_energy = qchem_data[key]
if key in list(psi_data):
psi_energy = psi_data[key]
psi_diff = abs(energy - psi_energy)
qchem_diff = abs(energy - qchem_energy)
spot = ""
if (psi_diff > 1.e-6) or (qchem_diff > 1.e-4):
spot = "*"
qchem_energy = float('NaN')
print("Comparison %10s: %16.10f : %8.2e %8.2e %s" % (key, energy, psi_diff, qchem_diff, spot))
|
rmcgibbo/psi4public
|
tests/libxc/devl/fine_input.py
|
Python
|
lgpl-3.0
| 5,599
|
[
"Psi4"
] |
82e89d034ed5b683df30f7a8590bc7355fdb000e4088490571814e32276ed58c
|
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.shared_randomstreams import RandomStreams
import numpy as np
from toolbox import *
from modelbase import *
import itertools
class FFN_ace(ModelSLBase):
"""
Auto-classifier-encoder (Georgiev, 2015)
"""
def save(self):
if not os.path.exists('savedmodels\\'):
os.makedirs('savedmodels\\')
self.params.save(self.filename)
def __init__(self, data, hp):
super(FFN_ace, self).__init__(self.__class__.__name__, data, hp)
# batch_size: 10000; learning_rate = 0.0015; lr_halflife = 200, 500
self.epsilon = 0.0001
self.params = Parameters()
self.shared_vars = Parameters()
n_x = self.data['n_x']
n_y = self.data['n_y']
n_h1 = 1200
n_h2 = 1000
n_h3 = 800
n_h4 = 800
scale = hp.init_scale
if hp.load_model and os.path.isfile(self.filename):
self.params.load(self.filename)
else:
with self.params:
w_h = shared_normal((n_x, n_h1), scale=scale)
b_h = shared_zeros((n_h1,))
w_h2 = shared_normal((n_h1, n_h2), scale=scale)
b_h2 = shared_zeros((n_h2,))
w_h3 = shared_normal((n_h2, n_h3), scale=scale)
b_h3 = shared_zeros((n_h3,))
w_h4 = shared_normal((n_h3, n_h4), scale=scale)
b_h4 = shared_zeros((n_h4,))
w_o = shared_normal((n_h4, n_y), scale=scale)
def batch_norm(h):
m = T.mean(h, axis=0, keepdims=True)
std = T.sqrt(T.var(h, axis=0, keepdims=True) + self.epsilon)
h = (h - m) / std
return h
def model(X, params, p_drop_input, p_drop_hidden):
X_noise = X + gaussian(X.shape, p_drop_input)
h = batch_norm(dropout(rectify(T.dot(X_noise, params.w_h) + params.b_h), p_drop_hidden))
# Dual reconstruction error
phx = T.nnet.sigmoid(T.dot(h, T.dot(h.T, X_noise)) / self.hp.batch_size)
log_phx = T.nnet.binary_crossentropy(phx, X_noise).sum()
h2 = dropout(rectify(T.dot(h, params.w_h2) + params.b_h2), p_drop_hidden)
h3 = batch_norm(dropout(rectify(T.dot(h2, params.w_h3) + params.b_h3), p_drop_hidden))
h4 = dropout(rectify(T.dot(h3, params.w_h4) + params.b_h4), p_drop_hidden)
py_x = softmax(T.dot(h4, params.w_o))
return [py_x, log_phx]
noise_py_x, cost_recon = model(self.X, self.params, 0.2, 0.5)
cost_y2 = -T.sum(self.Y * T.log(noise_py_x))
cost = cost_y2 + cost_recon
pyx, _ = model(self.X, self.params, 0., 0.)
map_pyx = T.argmax(pyx, axis=1)
error_map_pyx = T.sum(T.neq(map_pyx, T.argmax(self.Y, axis=1)))
self.compile(cost, error_map_pyx)
|
mrgloom/Theano-Lights
|
models/ffn_ace.py
|
Python
|
mit
| 3,046
|
[
"Gaussian"
] |
b4fd9f69cb849f23814070e3b6735115421609620cd4f424ee442d7b6028c9b9
|
from ase.calculators.singlepoint import SinglePointCalculator as SPC
from ase.constraints import dict2constraint
from ase.io import write
from ase import Atoms
import numpy as np
import json
supported_properties = ['energy', 'forces', 'stress', 'magmoms', 'magmom']
def array_to_list(data):
"""A function to covert all arrays in a structure of
embeded dictionaries and lists into lists themselves.
"""
if isinstance(data, list):
for i, v in enumerate(data):
if isinstance(v, np.ndarray):
data[i] = v.tolist()
elif isinstance(v, dict):
array_to_list(v)
elif isinstance(v, list):
array_to_list(v)
elif isinstance(data, dict):
for k, v in list(data.items()):
if isinstance(v, np.ndarray):
data[k] = v.tolist()
elif isinstance(v, dict):
array_to_list(v)
elif isinstance(v, list):
array_to_list(v)
def encode_to_atoms(encode, out_file='input.traj'):
"""Dump the encoding to a local traj file."""
# First, decode the trajectory
data = json.loads(encode, encoding='utf-8')
# Construct the initial atoms object
atoms = Atoms(
data['numbers'],
data['trajectory']['0']['positions'],
cell=data['trajectory']['0']['cell'],
pbc=data['pbc'])
atoms.info['calculator_parameters'] = data['calculator_parameters']
atoms.set_constraint([dict2constraint(_) for _ in data['constraints']])
initial_magmoms = data.get('initial_magmoms')
if initial_magmoms:
atoms.set_initial_magnetic_moments(initial_magmoms)
# Attach the calculator
results = {'atoms': atoms}
for prop in supported_properties:
results.update({prop: data['trajectory']['0'].get(prop)})
calc = SPC(**results)
atoms.set_calculator(calc)
# Collect the rest of the trajectory information
images = [atoms]
for i in range(len(data['trajectory']))[1:]:
atoms = atoms.copy()
if data['trajectory'][str(i)]['cell']:
atoms.set_cell(data['trajectory'][str(i)]['cell'])
if data['trajectory'][str(i)]['positions']:
atoms.set_positions(data['trajectory'][str(i)]['positions'])
results = {'atoms': atoms}
for prop in supported_properties:
results.update({prop: data['trajectory'][str(i)].get(prop)})
calc = SPC(**results)
atoms.set_calculator(calc)
images += [atoms]
# Write the traj file
if out_file:
write(out_file, images)
return images
def atoms_to_encode(images):
"""Converts an list of atoms objects to an encoding
from a .traj file.
"""
if not isinstance(images, list):
images = [images]
# Convert all constraints into dictionary format
constraints = [_.todict() for _ in images[0].constraints]
for i, C in enumerate(constraints):
# Turn any arrays in the kwargs into lists
for k, v in list(C['kwargs'].items()):
if isinstance(v, np.ndarray):
constraints[i]['kwargs'][k] = v.tolist()
# Convert any arrays from the parameter settings into lists
keys = images[0].info['calculator_parameters']
array_to_list(keys)
data = {'trajectory': {}}
# Assemble the compressed dictionary of results
for i, atoms in enumerate(images):
if i == 0:
# For first images, collect cell and positions normally
pos = atoms.get_positions()
update_pos = pos
cell = atoms.get_cell()
update_cell = cell
# Add the parameters which do not change
data['numbers'] = images[0].get_atomic_numbers().tolist()
data['pbc'] = images[0].get_pbc().tolist()
data['constraints'] = constraints
data['calculator_parameters'] = keys
initial_magmoms = atoms.arrays.get('initial_magmoms')
if initial_magmoms is not None:
data['initial_magmoms'] = list(initial_magmoms)
else:
# For consecutive images, check for duplication
# If duplicates are found, do not store it
if np.array_equal(atoms.get_positions(), pos):
update_pos = np.array([])
else:
pos = atoms.get_positions()
update_pos = pos
if np.array_equal(atoms.get_cell(), cell):
update_cell = np.array([])
else:
cell = atoms.get_cell()
update_cell = cell
results = {'positions': update_pos, 'cell': update_cell}
if atoms._calc:
for prop in supported_properties:
results.update({prop: atoms._calc.results.get(prop)})
for k, v in results.items():
if isinstance(v, np.ndarray):
results[k] = v.tolist()
# Store trajectory, throwing out None values
data['trajectory'][i] = {
k: v for k, v in list(
results.items()) if v is not None}
# Return the reduced results in JSON compression
encoding = json.dumps(data)
return encoding
|
jboes/CatKit
|
catkit/flow/fwio.py
|
Python
|
gpl-3.0
| 5,218
|
[
"ASE"
] |
2f71116cf88bd51370f14e34b938379208a46f04cfd999d1a106fce106665eaa
|
import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
(u'"double quotes" and \'single quotes\'', u'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(ur'\ : backslashes, too', u'\\u005C : backslashes, too'),
(u'and lots of whitespace: \r\n\t\v\f\b', u'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(ur'<script>and this</script>', u'\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(u'paragraph separator:\u2029and line separator:\u2028', u'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_clean_html(self):
f = html.clean_html
items = (
(u'<p>I <i>believe</i> in <b>semantic markup</b>!</p>', u'<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'),
(u'I escape & I don\'t <a href="#" target="_blank">target</a>', u'I escape & I don\'t <a href="#" >target</a>'),
(u'<p>I kill whitespace</p><br clear="all"><p> </p>', u'<p>I kill whitespace</p>'),
# also a regression test for #7267: this used to raise an UnicodeDecodeError
(u'<p>* foo</p><p>* bar</p>', u'<ul>\n<li> foo</li><li> bar</li>\n</ul>'),
)
for value, output in items:
self.check_output(f, value, output)
|
LethusTI/supportcenter
|
vendor/django/tests/regressiontests/utils/html.py
|
Python
|
gpl-3.0
| 5,467
|
[
"ADF"
] |
57452abf3d79da46b8b808106112e77cc16937e18329a1c125ab2047d440990d
|
# -*- coding: utf-8 -*-
r"""
.. _test-Laguerre-Gaussian:
Tests of Kirchhoff integral with various Gaussian beams
-------------------------------------------------------
Find the test module laguerre_gaussian_beam.py, as well as several other tests
for raycing backend, in ``\tests\raycing``.
.. note::
In this section we consider z-axis to be directed along the beamline in
order to be compatible with the formulas in their standard notations.
Elsewhere in xrt z-axis looks upwards.
Gaussian beam
~~~~~~~~~~~~~
Gaussian beam at the waist (at :math:`z=0`) is defined as
.. math::
u_0(r) = \sqrt{\frac2\pi}\frac1{w_0}\exp\left(-\frac{r^2}{w_0^2}\right),
where the pre-exponent factor provides normalization:
:math:`\int{|u|^2dS} = 2\pi\int_0^\infty {|u|^2 rdr} = 1`.
Define
:math:`z_R \buildrel \text{def}\over = \frac{\pi w_0^2}\lambda`,
:math:`R \buildrel \text{def}\over = z\left(1+\frac{z_R^2}{z^2}\right)`,
:math:`w \buildrel \text{def}\over = w_0\sqrt{1+\frac{z^2}{z_R^2}}`.
Gaussian beam at arbitrary z is expressed as:
.. math::
u(r, z) = \sqrt{\frac2\pi}\frac1{w_0}\frac1{1-i\frac{z}{z_R}}\exp
\left(-\frac{r^2}{w^2}-\frac{i\pi r^2}{\lambda R}\right),
where the exponent can also be rewritten as: :math:`\exp\left(-\frac{r^2}
{w_0^2(1-i\frac{z}{z_R})}\right)`. The pre-exponent factor can also be factored
as :math:`\frac1{1-i\frac{z}{z_R}} = \frac{w_0}w\exp(i\psi)` with :math:`\psi=
\arctan{\frac{z}{z_R}}=\frac{1}{2i}\log{\frac{1+i\frac{z}{z_R}}{1-i\frac{z}
{z_R}}}`.
:math:`U = u(r, z)\exp\left(-i(kz-\omega t)\right)` satisfies the wave equation
.. math::
\nabla^2U = \frac{1}{c^2}\frac{\partial^2U}{\partial t^2}.
:math:`u` can also be obtained by integrating the Gaussian beam waist in a
diffraction integral (in our implementation, the Kirchhoff integral):
.. math::
u(x, y, z) = -\frac{ik}{4\pi} \iint_{-\infty}^\infty u_0(x', y')
K(x, y, x', y') dx'dy'.
The table below compares Kirchhoff diffraction integrals of a Gaussian waist
with analytical solutions. The coloring is by wave phase. Notice equal shape
and unity total flux. The Gaussian waist was calculated as GaussianBeam with
:math:`w_0` = 15 µm:
+------------------------+
| Gaussian waist (z=0) |
+========================+
| |g00m| |
+------------------------+
.. |g00m| imagezoom:: _images/Gauss-0-beamFSMg-at00m.png
.. note::
The resulting unity flux is *not* obtained by an ad hoc normalization after
the diffraction. This flux is obtained from the diffraction field as is,
which demonstrates the correctness of the field amplitude in our
implementation of the Kirchhoff integral.
+-------+--------------------------+---------------------------------+
| | analytical Gaussian beam | numerical Kirchhoff diffraction |
+=======+==========================+=================================+
| z=5m | |g05m| | |k05m| |
+-------+--------------------------+---------------------------------+
| z=10m | |g10m| | |k10m| |
+-------+--------------------------+---------------------------------+
| z=20m | |g20m| | |k20m| |
+-------+--------------------------+---------------------------------+
| z=40m | |g40m| | |k40m| |
+-------+--------------------------+---------------------------------+
| z=80m | |g80m| | |k80m| |
+-------+--------------------------+---------------------------------+
.. |g05m| imagezoom:: _images/Gauss-1-beamFSMg-at05m.png
.. |k05m| imagezoom:: _images/Gauss-1-beamFSMk-at05m.png
:loc: upper-right-corner
.. |g10m| imagezoom:: _images/Gauss-2-beamFSMg-at10m.png
.. |k10m| imagezoom:: _images/Gauss-2-beamFSMk-at10m.png
:loc: upper-right-corner
.. |g20m| imagezoom:: _images/Gauss-3-beamFSMg-at20m.png
.. |k20m| imagezoom:: _images/Gauss-3-beamFSMk-at20m.png
:loc: upper-right-corner
.. |g40m| imagezoom:: _images/Gauss-4-beamFSMg-at40m.png
.. |k40m| imagezoom:: _images/Gauss-4-beamFSMk-at40m.png
:loc: upper-right-corner
.. |g80m| imagezoom:: _images/Gauss-5-beamFSMg-at80m.png
.. |k80m| imagezoom:: _images/Gauss-5-beamFSMk-at80m.png
:loc: upper-right-corner
Laguerre-Gaussian beam
~~~~~~~~~~~~~~~~~~~~~~
Vortex beams are given by Laguerre-Gaussian modes:
.. math::
u^l_p(r, \phi, z) = u(r, z) \sqrt{\frac{p!}{(p+|l|)!}}
\left(\frac{r\sqrt{2}}{w}\right)^{|l|}
L^{|l|}_p\left(\frac{2r^2}{w^2}\right)
\exp\left(i(|l|+2p)\psi\right)\exp(il\phi).
The flux is again normalized to unity.
The table below compares Kirchhoff diffraction integrals of a Laguerre-Gaussian
waist with analytical solutions. The coloring is by wave phase. Notice equal
shape and unity total flux. The Laguerre-Gaussian waist was calculated as
LaguerreGaussianBeam with :math:`w_0` = 15 µm, :math:`l` = 1 and :math:`p` = 1:
+---------------------------------+
| Laguerre-Gaussian waist (z=0) |
+=================================+
| |lg00m| |
+---------------------------------+
.. |lg00m| imagezoom:: _images/Laguerre-Gauss-0-beamFSMg-at00m.png
.. note::
The resulting unity flux is *not* obtained by an ad hoc normalization after
the diffraction. This flux is obtained from the diffraction field as is,
which demonstrates the correctness of the field amplitude in our
implementation of the Kirchhoff integral.
+-------+-----------------------------------+---------------------------------+
| | analytical Laguerre-Gaussian beam | numerical Kirchhoff diffraction |
+=======+===================================+=================================+
| z=5m | |lg05m| | |lk05m| |
+-------+-----------------------------------+---------------------------------+
| z=10m | |lg10m| | |lk10m| |
+-------+-----------------------------------+---------------------------------+
| z=20m | |lg20m| | |lk20m| |
+-------+-----------------------------------+---------------------------------+
| z=40m | |lg40m| | |lk40m| |
+-------+-----------------------------------+---------------------------------+
| z=80m | |lg80m| | |lk80m| |
+-------+-----------------------------------+---------------------------------+
.. |lg05m| imagezoom:: _images/Laguerre-Gauss-1-beamFSMg-at05m.png
.. |lk05m| imagezoom:: _images/Laguerre-Gauss-1-beamFSMk-at05m.png
:loc: upper-right-corner
.. |lg10m| imagezoom:: _images/Laguerre-Gauss-2-beamFSMg-at10m.png
.. |lk10m| imagezoom:: _images/Laguerre-Gauss-2-beamFSMk-at10m.png
:loc: upper-right-corner
.. |lg20m| imagezoom:: _images/Laguerre-Gauss-3-beamFSMg-at20m.png
.. |lk20m| imagezoom:: _images/Laguerre-Gauss-3-beamFSMk-at20m.png
:loc: upper-right-corner
.. |lg40m| imagezoom:: _images/Laguerre-Gauss-4-beamFSMg-at40m.png
.. |lk40m| imagezoom:: _images/Laguerre-Gauss-4-beamFSMk-at40m.png
:loc: upper-right-corner
.. |lg80m| imagezoom:: _images/Laguerre-Gauss-5-beamFSMg-at80m.png
:loc: lower-left-corner
.. |lk80m| imagezoom:: _images/Laguerre-Gauss-5-beamFSMk-at80m.png
:loc: lower-right-corner
Hermite-Gaussian beam
~~~~~~~~~~~~~~~~~~~~~
Higher order modes in rectangular coordinates are given by Hermite-Gaussian
modes:
.. math::
u_{mn}(x, y, z) = u(r, z) \frac{1}{\sqrt{2^{m+n}m!n!}}
H_m\left(\frac{\sqrt2x}{w}\right) H_n\left(\frac{\sqrt2y}{w}\right)
\exp\left(i(m+n)\psi\right),
where :math:`r^2 = x^2 + y^2`. The flux is again normalized to unity.
The table below compares Kirchhoff diffraction integrals of a Hermite-Gaussian
waist with analytical solutions. The coloring is by wave phase. Notice equal
shape and unity total flux. The Hermite-Gaussian waist was calculated as
HermiteGaussianBeam with :math:`w_0` = 15 µm, :math:`m` = 3 and :math:`n` = 2:
+---------------------------------+
| Hermite-Gaussian waist (z=0) |
+=================================+
| |hg00m| |
+---------------------------------+
.. |hg00m| imagezoom:: _images/Hermite-Gauss-0-beamFSMg-at00m.png
.. note::
The resulting unity flux is *not* obtained by an ad hoc normalization after
the diffraction. This flux is obtained from the diffraction field as is,
which demonstrates the correctness of the field amplitude in our
implementation of the Kirchhoff integral.
+-------+-----------------------------------+---------------------------------+
| | analytical Hermite-Gaussian beam | numerical Kirchhoff diffraction |
+=======+===================================+=================================+
| z=5m | |hg05m| | |hk05m| |
+-------+-----------------------------------+---------------------------------+
| z=10m | |hg10m| | |hk10m| |
+-------+-----------------------------------+---------------------------------+
| z=20m | |hg20m| | |hk20m| |
+-------+-----------------------------------+---------------------------------+
| z=40m | |hg40m| | |hk40m| |
+-------+-----------------------------------+---------------------------------+
| z=80m | |hg80m| | |hk80m| |
+-------+-----------------------------------+---------------------------------+
.. |hg05m| imagezoom:: _images/Hermite-Gauss-1-beamFSMg-at05m.png
.. |hk05m| imagezoom:: _images/Hermite-Gauss-1-beamFSMk-at05m.png
:loc: upper-right-corner
.. |hg10m| imagezoom:: _images/Hermite-Gauss-2-beamFSMg-at10m.png
.. |hk10m| imagezoom:: _images/Hermite-Gauss-2-beamFSMk-at10m.png
:loc: upper-right-corner
.. |hg20m| imagezoom:: _images/Hermite-Gauss-3-beamFSMg-at20m.png
.. |hk20m| imagezoom:: _images/Hermite-Gauss-3-beamFSMk-at20m.png
:loc: upper-right-corner
.. |hg40m| imagezoom:: _images/Hermite-Gauss-4-beamFSMg-at40m.png
.. |hk40m| imagezoom:: _images/Hermite-Gauss-4-beamFSMk-at40m.png
:loc: upper-right-corner
.. |hg80m| imagezoom:: _images/Hermite-Gauss-5-beamFSMg-at80m.png
:loc: lower-left-corner
.. |hk80m| imagezoom:: _images/Hermite-Gauss-5-beamFSMk-at80m.png
:loc: lower-right-corner
"""
__author__ = "Konstantin Klementiev"
__date__ = "28 May 2016"
import os, sys; sys.path.append(os.path.join('..', '..')) # analysis:ignore
import numpy as np
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.screens as rsc
import xrt.backends.raycing.waves as rw
import xrt.plotter as xrtp
xrtp.colorFactor = 1.
import xrt.runner as xrtr
case = 2
wantKirchhoff = True
if case == 0:
prefix = 'Gauss'
maxFactor = 2. # factor that determines the screen limits as ±w*maxFactor
elif case == 1:
prefix = 'Laguerre-Gauss'
lVortex, pVortex = 1, 1
maxFactor = 2*(abs(lVortex)+pVortex+1)**0.25
elif case == 2:
prefix = 'Hermite-Gauss'
m, n = 3, 2
maxFactor = 2 * (m+n)**0.25
else:
raise ValueError("unknown case")
E0 = 9000. # eV
w0 = 15e-3 # mm, waist size of the amplitude (not of intensity!)
# w0 = 30e-3, 15e-3 # mm, waist sizes for elliptical Gaussian beam
# screen positions:
if True: # short test
ps = np.array([0, 0.5, 1, 2, 4, 8]) * 10000.
else: # long test
ps = np.array(list(range(10)) + list(range(1, 11)) +
list(range(20, 101, 10))) * 1000.
ps[0:10] /= 10.
#print(ps)
bins, ppb = 256, 1
def build_beamline():
beamLine = raycing.BeamLine(height=0)
if case == 0:
beamLine.source = rs.GaussianBeam(
beamLine, prefix, w0=w0, energies=(E0,))
elif case == 1:
beamLine.source = rs.LaguerreGaussianBeam(
beamLine, prefix, w0=w0, vortex=(lVortex, pVortex), energies=(E0,))
elif case == 2:
beamLine.source = rs.HermiteGaussianBeam(
beamLine, prefix, w0=w0, TEM=(m, n), energies=(E0,))
beamLine.fsmFar = rsc.Screen(beamLine, 'FSM', [0, 0, 0])
return beamLine
def run_process(beamLine):
outDict = {}
for ip, (p, (x, z)) in enumerate(zip(ps, beamLine.fsmXZmeshes)):
print('screen position {0} of {1}'.format(ip+1, len(ps)))
beamLine.fsmFar.center[1] = p
waveOnFSMg = beamLine.fsmFar.prepare_wave(beamLine.source, x, z)
beamLine.source.shine(wave=waveOnFSMg)
# mult = np.exp(0.5j * waveOnFSMg.x / x.max())
# waveOnFSMg.Es *= mult
# waveOnFSMg.Ep *= mult
# mult = np.exp(0.5j * waveOnFSMg.z / z.max())
# waveOnFSMg.Es *= mult
# waveOnFSMg.Ep *= mult
if outDict == {}:
beamSource = waveOnFSMg
what = 'beamFSMg{0}'.format(ip)
outDict[what] = waveOnFSMg
if p > 100 and wantKirchhoff:
wrepeats = 1
waveOnFSMk = beamLine.fsmFar.prepare_wave(beamLine.source, x, z)
for r in range(wrepeats):
rw.diffract(beamSource, waveOnFSMk)
if wrepeats > 1:
print('wave repeats: {0} of {1} done'.format(
r+1, wrepeats))
what = 'beamFSMk{0}'.format(ip)
outDict[what] = waveOnFSMk
return outDict
rr.run_process = run_process
def define_plots(beamLine):
plots = []
beamLine.fsmXZmeshes = []
for ip, p in enumerate(ps):
lim = beamLine.source.w(p, E0) * maxFactor * 1e3
plot = xrtp.XYCPlot(
'beamFSMg{0}'.format(ip), (1,),
xaxis=xrtp.XYCAxis(r'$x$', u'µm', bins=bins, ppb=ppb),
yaxis=xrtp.XYCAxis(r'$z$', u'µm', bins=bins, ppb=ppb),
caxis=xrtp.XYCAxis('Es phase', '', data=raycing.get_Es_phase,
bins=bins, ppb=ppb))
plot.xaxis.limits = [-lim, lim]
plot.yaxis.limits = [-lim, lim]
plot.title = '{0}-{1:02d}-beamFSMg-at{2:03.1f}m'.format(
prefix, ip, p*1e-3)
tpf = '{0:2.1f} m' if p < 1000 else '{0:2.0f} m'
plot.textPanel = plot.ax2dHist.text(
0.02, 0.98, tpf.format(p*1e-3), size=14, color='w',
transform=plot.ax2dHist.transAxes,
ha='left', va='top')
plot.saveName = plot.title + '.png'
plots.append(plot)
if p > 100 and wantKirchhoff:
plot = xrtp.XYCPlot(
'beamFSMk{0}'.format(ip), (1,),
xaxis=xrtp.XYCAxis(r'$x$', u'µm', bins=bins, ppb=ppb),
yaxis=xrtp.XYCAxis(r'$z$', u'µm', bins=bins, ppb=ppb),
caxis=xrtp.XYCAxis('Es phase', '', data=raycing.get_Es_phase,
bins=bins, ppb=ppb))
plot.xaxis.limits = [-lim, lim]
plot.yaxis.limits = [-lim, lim]
plot.textPanel = plot.ax2dHist.text(
0.02, 0.98, tpf.format(p*1e-3), size=14, color='w',
transform=plot.ax2dHist.transAxes,
ha='left', va='top')
plot.title = '{0}-{1:02d}-beamFSMk-at{2:03.1f}m'.format(
prefix, ip, p*1e-3)
plot.saveName = plot.title + '.png'
plots.append(plot)
ax = plot.xaxis
edges = np.linspace(ax.limits[0], ax.limits[1], ax.bins+1)
xCenters = (edges[:-1] + edges[1:]) * 0.5 / ax.factor
ax = plot.yaxis
edges = np.linspace(ax.limits[0], ax.limits[1], ax.bins+1)
zCenters = (edges[:-1] + edges[1:]) * 0.5 / ax.factor
beamLine.fsmXZmeshes.append([xCenters, zCenters])
for plot in plots:
plot.caxis.limits = [-np.pi, np.pi]
plot.caxis.fwhmFormatStr = None
plot.ax1dHistE.set_yticks([l*np.pi for l in (-1, -0.5, 0, 0.5, 1)])
plot.ax1dHistE.set_yticklabels(
(r'$-\pi$', r'-$\frac{\pi}{2}$', 0, r'$\frac{\pi}{2}$', r'$\pi$'))
return plots
def main():
beamLine = build_beamline()
plots = define_plots(beamLine)
xrtr.run_ray_tracing(
plots, repeats=1, updateEvery=1, beamLine=beamLine, processes=1)
def plot_w():
import matplotlib.pyplot as plt
beamLine = build_beamline()
E = beamLine.source.energies[0]
y = np.linspace(-50, 50, 201) * 1e3
fig = plt.figure(figsize=(7, 6))
ax = fig.add_subplot(111)
ax.set_xlabel(u'$y$ (m)')
ax.set_ylabel(u'$w$ (µm)')
# ax.set_xlim([0, 1])
# ax.set_ylim([energy[0], energy[-1]])
w0s = w0 if raycing.is_sequence(w0) else [w0]
for w0i, color in zip(w0s, ['C0', 'C1']):
w = beamLine.source.w(y, E=E, w0=w0i)
yR = beamLine.source.rayleigh_range(E, w0=w0i)
ax.plot(y*1e-3, w*1e3, color=color,
label='$w_0$={0:.0f} µm'.format(w0i*1e3))
ax.plot(y*1e-3, -w*1e3, color=color)
ax.axvline(yR*1e-3, linestyle='--', color=color)
ax.axvline(-yR*1e-3, linestyle='--', color=color)
ax.legend()
fig.savefig('gaussian_beam_profile.png')
plt.show()
if __name__ == '__main__':
# plot_w()
main()
|
kklmn/xrt
|
tests/raycing/laguerre_hermite_gaussian_beam.py
|
Python
|
mit
| 17,167
|
[
"Gaussian"
] |
1c6a0b3059f5002a5ff999a7e79c2386a2d27f01e9087777f74b0a399cfe501b
|
import numpy as np
bin_size = 10
num_average_together = 1
num_neurons = 1000
start_num = 0
num_trials = 50
folder = 'data/attractor'
filename = 'attractor'
# read in trajectories' neural data
min_length = 1e20
for dimension in range(8):
trajectories = []
for jj in range(start_num, num_trials):
name = ('%s/%s_%i-dimensional_trial%.3i.dat.npz' %
(folder, filename, dimension, jj))
print('reading in %s...' % name)
spikes = np.load(name)['array1']
spikes = spikes.reshape(-1, num_neurons)
if spikes.shape[0] < min_length:
min_length = spikes.shape[0]
print('clipping all data to length %i' % min_length)
trajectories.append(spikes)
num_averaged_trials = int(len(trajectories) / num_average_together)
num_timesteps = min_length
num_bins = int(num_timesteps / bin_size)
neuron_data = np.array(trajectories[:][:num_timesteps])
print('binning spike data')
# convert the spike trains to firing rates, binned in 10ms windows
firing_rates = np.zeros((len(trajectories), num_bins, num_neurons))
for ii in range(len(trajectories)):
for jj in range(num_neurons):
for kk in range(num_bins):
firing_rates[ii, kk, jj] = np.sum(
neuron_data[ii][kk*bin_size:(kk+1)*bin_size, jj])
print('averaging %i trials together' % num_average_together)
fr_avgs = np.zeros((num_averaged_trials, num_bins, num_neurons))
for ii in range(num_averaged_trials):
fr_avg = np.zeros((num_bins, num_neurons))
for jj in range(num_average_together):
fr_avg += firing_rates[ii*num_average_together + jj]
fr_avg /= num_average_together
fr_avgs[ii] = fr_avg.copy()
print('applying 20**2ms Gaussian filter')
gauss = np.exp(-np.linspace(-1, 1, num_bins)**2 / (2*.02))
gauss /= np.sum(gauss)
fr_dict = {}
for ii in range(num_averaged_trials):
fr_smoothed = np.zeros((num_bins, num_neurons))
for jj in range(num_neurons):
fr_smoothed[:, jj] = np.convolve(fr_avgs[ii, :, jj],
gauss, mode='same')
fr_dict.update({'trial%i' % ii: fr_smoothed})
print('writing full set to matlab file')
import scipy.io
scipy.io.savemat(
'data/attractor/processed_data/attractor_%i_firing_rates' %
dimension, fr_dict)
|
studywolf/REACH-paper
|
analysis/09b-gen_matlab_files_for_jpca_analysis.py
|
Python
|
gpl-3.0
| 2,427
|
[
"Gaussian"
] |
7424a2cc180721ec3549531b3e5800635f6c9fc4d39721f7c4e730a854ed212b
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
TakayukiSakai/tensorflow
|
tensorflow/models/image/cifar10/cifar10.py
|
Python
|
apache-2.0
| 13,577
|
[
"Gaussian"
] |
0edb3ce242c0e93f1ee064e17d14484c3db284e2b4c4893a64840b870fe0eb12
|
#!/usr/bin/env python
"""
:mod:`disco <discocli>` -- Disco command line utility
=====================================================
:program:`disco` is a fully-Python startup/configuration script which supports several exciting features.
The new startup script makes it even easier to get up and running with a Disco cluster.
.. note::
This is the manpage for the :program:`disco` command.
Please see :ref:`setup` for more information on installing Disco.
.. hint::
The documentation assumes that the executable ``$DISCO_HOME/bin/disco`` is on your system path.
If it is not on your path, you can add it::
ln -s $DISCO_HOME/bin/disco /usr/bin
If ``/usr/bin`` is not in your ``$PATH``, use an appropriate replacement.
Doing so allows you to simply call :program:`disco`, instead of specifying the complete path.
Run :command:`disco help` for information on using the command line utility.
.. seealso::
The :mod:`ddfs <ddfscli>` command.
See :mod:`disco.settings` for information about Disco settings.
.. _jobhistory:
Job History
-----------
For commands which take a jobname, or which support :option:`-j`,
the special arguments ``@`` and ``@?<string>``
are replaced by the most recent job name and
the most recent job with name matching ``<string>``, respectively.
For example::
disco results @
Would get the results for the most recent job, and::
disco results @?WordCount
Would get the results for the last job with name containing ``WordCount``.
"""
import fileinput, os, sys
if '.disco-home' in os.listdir('.'):
sys.path.append('lib')
from disco.cli import OptionParser, Program
class Disco(Program):
pass
@Disco.command
def debug(program, host=''):
"""Usage: [host]
Connect to master Erlang process via remote shell.
Host is only necessary when master is running on a remote machine.
"""
from subprocess import Popen
master = program.master
nodename = '{0}@{1}'.format(master.name, host) if host else master.nodename
args = program.settings['DISCO_ERLANG'].split() + \
['-remsh', nodename,
'-sname', '{0}_remsh'.format(os.getpid())]
if Popen(args).wait():
raise Exception("Could not connect to {0} ({1})".format(host, nodename))
print("closing remote shell to {0} ({1})".format(host, nodename))
@Disco.command
def nodaemon(program):
"""
Start the master in the current process.
The Erlang shell is opened and log messages are printed to stdout.
Note: quitting the shell will stop the master.
"""
for message in program.master.nodaemon():
print(message)
@Disco.command
def restart(program):
"""
Restart the master.
"""
for message in program.master.restart():
print(message)
@Disco.command
def start(program):
"""
Start the master.
"""
for message in program.master.start():
print(message)
@Disco.command
def status(program):
"""
Display running state of the master. This command should be run on the master.
"""
for message in program.master.status():
print(message)
@Disco.command
def stop(program):
"""
Stop the master.
"""
for message in program.master.stop():
print(message)
@Disco.command
def test(program, *tests):
"""Usage: [testname ...]
Run the specified tests or the entire test suite if none are specified.
Assumes Disco master is already running and configured.
Test names is an optional list of names of modules in the ``$DISCO_HOME/tests`` directory (e.g. ``test_simple``).
Test names may also include the names of specific test cases (e.g. ``test_sort.MemorySortTestCase``).
"""
from disco.test import TestRunner
if not tests:
tests = list(program.tests)
sys.path.insert(0, program.tests_path)
TestRunner(program.settings).run(*tests)
@Disco.command
def config(program):
"""
Print the disco master configuration.
"""
for config in program.disco.config:
print("\t".join(config))
@Disco.add_job_mode
@Disco.command
def deref(program, *urls):
"""Usage: [url ...]
Dereference the urls and print them to stdout.
Input urls are specified as arguments or read from stdin.
"""
from disco.util import deref
for input in deref(program.input(*urls), resolve=program.options.resolve):
print("\t".join(input))
deref.add_option('-r', '--resolve',
action='store_true',
help='resolve disco internal urls')
@Disco.job_command
def events(program, jobname):
"""Usage: jobname
Print the events for the named job.
"""
print(program.disco.rawevents(jobname, offset=int(program.options.offset)))
events.add_option('-o', '--offset',
default=0,
help='offset to use in requests to disco master')
@Disco.command
def job(program, worker, *inputs):
"""Usage: worker [input ...]
Create a jobpack and submit it to the master.
Worker is automatically added to the jobhome.
Input urls are specified as arguments or read from stdin.
"""
from disco.fileutils import DiscoZipFile
from disco.job import JobPack
def jobzip(*paths):
jobzip = DiscoZipFile()
for path in paths:
jobzip.writepath(path)
jobzip.close()
return jobzip
def jobdata(data):
if data.startswith('@'):
return open(data[1:]).read()
return data
def prefix(p):
return p or os.path.basename(worker).split(".")[0]
jobdict = {'input': program.input(*inputs),
'worker': worker,
'map?': program.options.has_map,
'reduce?': program.options.has_reduce,
'nr_reduces': program.options.nr_reduces,
'prefix': prefix(program.options.prefix),
'scheduler': program.scheduler,
'owner': program.options.owner or program.settings['DISCO_JOB_OWNER']}
jobenvs = dict(program.options.env)
jobzip = jobzip(worker, *program.options.files)
jobdata = jobdata(program.options.data)
jobpack = JobPack(jobdict, jobenvs, jobzip.dumps(), jobdata)
if program.options.verbose:
print("jobdict:")
print("\n".join("\t{0[0]}\t{0[1]}".format(item) for item in jobdict.items()))
print("jobenvs:")
print("\n".join("\t{0[0]}\t{0[1]}".format(item) for item in jobenvs.items()))
print("jobzip:")
print("\n".join("\t{0}".format(name) for name in jobzip.namelist()))
print("jobdata:")
print("\n".join("\t{0}".format(line) for line in jobdata.splitlines()))
if program.options.dump_jobpack:
print(jobpack.dumps())
else:
print(program.disco.submit(jobpack.dumps()))
job.add_option('-m', '--has-map',
action='store_true',
help='sets the map phase flag of the jobdict')
job.add_option('-r', '--has-reduce',
action='store_true',
help='sets the reduce phase flag of the jobdict')
job.add_option('-n', '--nr-reduces',
default=1,
type='int',
help='number of reduces in the reduce phase')
job.add_option('-o', '--owner',
help='owner of the job')
job.add_option('-p', '--prefix',
default=None,
help='prefix to use when naming the job')
job.add_option('-S', '--scheduler',
action='setitem2',
default={},
nargs=2,
help='add a param to the scheduler field of the jobdict')
job.add_option('-e', '--env',
action='append',
default=[],
nargs=2,
help='add a variable to jobenvs')
job.add_option('-f', '--file',
action='append',
default=[],
dest='files',
help='path to add to the jobhome (recursively adds directories)')
job.add_option('-d', '--data',
default='',
help='additional binary jobdata, read from a file if it starts with "@"')
job.add_option('-D', '--dump-jobpack',
action='store_true',
help='dump the jobpack without submitting it to the master')
@Disco.job_command
def jobdict(program, jobname):
"""Usage: jobname
Print the jobdict for the named job.
"""
print(jobname)
for key, value in program.disco.jobpack(jobname).jobdict.items():
print("\t{0}\t{1}".format(key, value))
@Disco.command
def jobs(program):
"""
Print a list of disco jobs and optionally their statuses.
"""
for offset, status, job in program.disco.joblist():
print("{0}\t{1}".format(job, status) if program.options.status else job)
jobs.add_option('-S', '--status',
action='store_true',
help='show job status when printing jobs')
@Disco.job_command
def kill(program, *jobnames):
"""Usage: jobname ...
Kill the named jobs.
"""
for jobname in jobnames:
program.disco.kill(jobname)
@Disco.job_command
def stageresults(program, jobname):
"""Usage: jobname -S stage
Print the list of results from a stage of a job.
This is useful for resuming a job which has failed during
following stages.
"""
from disco.util import iterify
stagename = program.options.stage
for result in program.disco.stageresults(jobname, stagename):
print('\t'.join('{0}'.format(e) for e in iterify(result)).rstrip())
stageresults.add_option('-S', '--stage',
default='map',
help='target stage.')
@Disco.command
def nodeinfo(program):
"""Usage:
Print the node information.
"""
for item in program.disco.nodeinfo().items():
print('{0[0]}\t{0[1]}'.format(item))
@Disco.job_command
def oob(program, jobname):
"""Usage: jobname
Print the oob keys for the named job.
"""
from disco.job import Job
for key in Job(name=jobname, master=program.disco).oob_list():
print(key)
@oob.subcommand
def get(program, key, jobname):
"""Usage: key jobname
Print the oob value for the given key and jobname.
"""
from disco.job import Job
print(Job(name=program.job_history(jobname), master=program.disco).oob_get(key))
@Disco.job_command
def pstats(program, jobname):
"""Usage: jobname
Print the profiling statistics for the named job.
Assumes the job was run with profile flag enabled.
"""
sort_stats = program.options.sort_stats or ['cumulative']
program.disco.profile_stats(jobname).sort_stats(*sort_stats).print_stats()
pstats.add_option('-k', '--sort-stats',
action='append',
default=[],
help='keys to use for sorting profiling statistics')
@Disco.job_command
def purge(program, *jobnames):
"""Usage: jobname ...
Purge the named jobs.
"""
for jobname in jobnames:
program.disco.purge(jobname)
@Disco.job_command
def results(program, jobname):
"""Usage: jobname
Print the list of results for a completed job.
"""
from disco.util import iterify
status, results = program.disco.results(jobname)
for result in results:
print('\t'.join('{0}'.format(e) for e in iterify(result)).rstrip())
@Disco.command
def run(program, jobclass, *inputs):
"""Usage: jobclass [input ...]
Create an instance of jobclass and run it.
Input urls are specified as arguments or read from stdin.
"""
from disco.util import reify
sys.path.insert(0, '')
job = reify(jobclass)(name=program.options.name,
master=program.disco,
settings=program.settings)
input = program.input(*inputs)
if any(input):
program.options.jobargs['input'] = input
if program.options.scheduler:
program.options.jobargs['scheduler'] = program.scheduler
job.run(**program.options.jobargs)
print(job.name)
run.add_option('-n', '--name',
help='prefix to use for submitting a job')
run.add_option('-m', '--map',
action='setitem',
dest='jobargs',
type='reify',
help='the worker map parameter')
run.add_option('-r', '--reduce',
action='setitem',
dest='jobargs',
type='reify',
help='the worker reduce parameter')
run.add_option('--save',
action='setitem',
dest='jobargs',
type='reify',
help='save results to DDFS?')
run.add_option('--profile',
action='setitem',
dest='jobargs',
type='reify',
help='enable job profiling?')
run.add_option('--partitions',
action='setitem',
dest='jobargs',
type='reify',
help='number of partitions to create, if any')
run.add_option('-S', '--scheduler',
action='setitem2',
nargs=2,
help='add a param to the scheduler field of the jobdict')
run.add_option('-P', '--param',
action='setitem2',
dest='jobargs',
default={},
nargs=2,
help='add a job parameter')
@Disco.command
def submit(program, *file):
"""Usage: [file]
Submit a jobpack to the master.
Reads the jobpack from file or stdin.
"""
print(program.disco.submit(''.join(fileinput.input(file))))
@Disco.command
def client_version(program):
"""
Print the version of the Python Disco client library.
This assumes that the library has an installed egg file."
"""
from disco.core import client_version
print(client_version())
@Disco.command
def master_version(program):
"""
Print the version of the Disco master.
This assumes that the master is running."
"""
print(program.disco.master_version())
@Disco.job_command
def wait(program, jobname):
"""Usage: jobname
Wait for the named job to complete and print the list of results.
"""
from disco.util import iterify
for result in program.disco.wait(jobname):
print('\t'.join('{0}'.format(e,) for e in iterify(result)).rstrip())
if __name__ == '__main__':
Disco(option_parser=OptionParser()).main()
# Workaround for "disco test" in Python2.5 which doesn't shutdown the
# test_server thread properly.
sys.exit(0) # XXX still needed?
|
discoproject/disco
|
bin/discocli.py
|
Python
|
bsd-3-clause
| 14,534
|
[
"exciting"
] |
eb8d780be76d39301dfec5302892722b0071cdbffd92e2fa800fa40baf92d7ce
|
"""Attempt #2 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
"""
import numpy as np
import weakref
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class Neuron(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if key not in dir(self):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neuron, self).__setattr__(key, value)
class LIF(Neuron):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
"""
Base class for neuron pools
Pass in a list of neuron_types to set parameters
"""
class NeuronPool:
def __init__(self, n_neurons, neuron_types=None):
if neuron_types is None:
neuron_types = self.neuron_types
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self.make(n_neurons)
def make(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
"""
Various neuron models
"""
class LIFRatePool(NeuronPool):
neuron_type = [LIF, Rate]
def make(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
class LIFSpikingPool(NeuronPool):
neuron_type = [LIF, Spiking]
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
class LIFFixedPool(NeuronPool):
neuron_type = [LIF, Spiking, Fixed]
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
class IzhikevichPool(NeuronPool):
neuron_type = [Izhikevich, Spiking]
def make(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
"""
Create a pool of neurons, given the required type specifications
"""
import inspect
def create(n_neurons, neuron_type):
# make sure it's a list
try:
len(neuron_type)
except TypeError:
neuron_type = [neuron_type]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_type):
if inspect.isclass(type):
neuron_type[i] = type()
# look through the list of neuron models to see if we can
# find a match
for model in neuron_models:
for type in neuron_type:
if type.__class__ not in model.neuron_type:
break
else:
return model(n_neurons, neuron_type)
raise Exception('Could not find suitable neuron model')
if __name__ == '__main__':
spiking = create(100, [LIF, Spiking])
rate = create(100, [LIF, Rate])
fixed = create(100, [LIF, Fixed])
iz = create(100, [Izhikevich])
#iz = create(100, [Izhikevich(a=0.02, b=0.2, c=-50, d=2)])
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
spiking_data = []
rate_data = []
iz_data = []
fixed_data = []
v = []
for i in range(int(T/dt)):
spiking_data.append(spiking.step(dt, J))
rate_data.append(rate.step(dt, J))
iz_data.append(iz.step(dt, J))
fixed_data.append(fixed.step(dt, J))
v.append(fixed.voltage[-1])
rate_tuning = np.sum(rate_data, axis=0)/T
spiking_tuning = np.sum(spiking_data, axis=0)/T
iz_tuning = np.sum(iz_data, axis=0)/T
fixed_tuning = np.sum(fixed_data, axis=0)/T
import pylab
pylab.subplot(2, 1, 1)
pylab.plot(J, rate_tuning)
pylab.plot(J, spiking_tuning)
pylab.plot(J, iz_tuning)
pylab.plot(J, fixed_tuning, linewidth=4)
pylab.subplot(2, 1, 2)
pylab.plot(v)
#pylab.plot(np.array(fixed_data)[:,-1])
pylab.show()
|
ctn-waterloo/neuron_models
|
v2-parameters.py
|
Python
|
mit
| 7,491
|
[
"NEURON"
] |
0d3dd1909259e204a197618a34eba70cf5f6927f319c474b9533174cdc992f16
|
from __future__ import with_statement
from distutils.core import setup
import numpy
def get_version():
d={}
version_line=''
with open('science/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
version_line=line
print(version_line)
exec(version_line,d)
return d['__version__']
setup(
name = 'science',
version=get_version(),
description="Utilities for Science",
author="Brian Blais",
packages=['science'],
package_data = {'science': ['science.mplstyle']},
)
|
bblais/Python-for-Science
|
setup.py
|
Python
|
mit
| 579
|
[
"Brian"
] |
5ce1a5f7a5716021f9df6a737a0067df8c7c7bfe080e69b4dd246bef844dc4c8
|
"""Support for control of ElkM1 lighting (X10, UPB, etc)."""
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from . import DOMAIN as ELK_DOMAIN, ElkEntity, create_elk_entities
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Elk light platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
async_add_entities(
create_elk_entities(hass, elk.lights, 'plc', ElkLight, []), True)
class ElkLight(ElkEntity, Light):
"""Representation of an Elk lighting device."""
def __init__(self, element, elk, elk_data):
"""Initialize the Elk light."""
super().__init__(element, elk, elk_data)
self._brightness = self._element.status
@property
def brightness(self):
"""Get the brightness."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def is_on(self) -> bool:
"""Get the current brightness."""
return self._brightness != 0
def _element_changed(self, element, changeset):
status = self._element.status if self._element.status != 1 else 100
self._brightness = round(status * 2.55)
async def async_turn_on(self, **kwargs):
"""Turn on the light."""
self._element.level(round(kwargs.get(ATTR_BRIGHTNESS, 255) / 2.55))
async def async_turn_off(self, **kwargs):
"""Turn off the light."""
self._element.level(0)
|
MartinHjelmare/home-assistant
|
homeassistant/components/elkm1/light.py
|
Python
|
apache-2.0
| 1,614
|
[
"Elk"
] |
3c22c77487e1c6e7301f1fcdd6f1a14f138084114a98c2c2e93c15b9ceaa5134
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 8 Nov 2012
@author: George
'''
'''
Models a FIFO queue where entities can wait in order to get into a server
'''
import simpy
from CoreObject import CoreObject
# ===========================================================================
# the Queue object
# ===========================================================================
class Queue(CoreObject):
family='Buffer'
#===========================================================================
# the __init__ method of the Queue
#===========================================================================
def __init__(self, id='', name='', capacity=1, isDummy=False, schedulingRule="FIFO",
level=None, gatherWipStat=False, **kw):
self.type="Queue" # String that shows the type of object
CoreObject.__init__(self, id, name)
capacity=float(capacity)
if capacity<0 or capacity==float("inf"):
self.capacity=float("inf")
else:
self.capacity=int(capacity)
self.isDummy=bool(int(isDummy)) #Boolean that shows if it is the dummy first Queue
self.schedulingRule=schedulingRule #the scheduling rule that the Queue follows
self.multipleCriterionList=[] #list with the criteria used to sort the Entities in the Queue
SRlist = [schedulingRule]
if schedulingRule.startswith("MC"): # if the first criterion is MC aka multiple criteria
SRlist = schedulingRule.split("-") # split the string of the criteria (delimiter -)
self.schedulingRule=SRlist.pop(0) # take the first criterion of the list
self.multipleCriterionList=SRlist # hold the criteria list in the property multipleCriterionList
for scheduling_rule in SRlist:
if scheduling_rule not in self.getSupportedSchedulingRules():
raise ValueError("Unknown scheduling rule %s for %s" %
(scheduling_rule, id))
self.gatherWipStat=gatherWipStat
# trigger level for the reallocation of operators
if level:
assert level<=self.capacity, "the level cannot be bigger than the capacity of the queue"
self.level=level
from Globals import G
G.QueueList.append(self)
@staticmethod
def getSupportedSchedulingRules():
return ("FIFO", "Priority", "EDD", "EOD",
"NumStages", "RPC", "LPT", "SPT", "MS", "WINQ")
#===========================================================================
# the initialize method of the Queue class
#===========================================================================
def initialize(self):
# using the Process __init__ and not the CoreObject __init__
CoreObject.initialize(self)
# initialise the internal Queue (type Resource) of the Queue object
self.Res=simpy.Resource(self.env, self.capacity)
# event used by router
self.loadOperatorAvailable=self.env.event()
self.expectedSignals['isRequested']=1
self.expectedSignals['canDispose']=1
self.expectedSignals['loadOperatorAvailable']=1
#===========================================================================
# run method of the queue
#===========================================================================
def run(self):
activeObjectQueue=self.Res.users
# check if there is WIP and signal receiver
self.initialSignalReceiver()
while 1:
self.printTrace(self.id, waitEvent='')
# wait until the Queue can accept an entity and one predecessor requests it
self.expectedSignals['canDispose']=1
self.expectedSignals['isRequested']=1
self.expectedSignals['loadOperatorAvailable']=1
receivedEvent=yield self.env.any_of([self.isRequested, self.canDispose, self.loadOperatorAvailable])
self.printTrace(self.id, received='')
# if the event that activated the thread is isRequested then getEntity
if self.isRequested in receivedEvent:
transmitter, eventTime=self.isRequested.value
self.printTrace(self.id, isRequested=transmitter.id)
# reset the isRequested signal parameter
self.isRequested=self.env.event()
self.getEntity()
#if entity just got to the dummyQ set its startTime as the current time
if self.isDummy:
activeObjectQueue[0].startTime=self.env.now
# if the queue received an loadOperatorIsAvailable (from Router) with signalparam time
if self.loadOperatorAvailable in receivedEvent:
transmitter, eventTime=self.loadOperatorAvailable.value
self.loadOperatorAvailable=self.env.event()
# if the queue received an canDispose with signalparam time, this means that the signals was sent from a MouldAssemblyBuffer
if self.canDispose in receivedEvent:
transmitter, eventTime=self.canDispose.value
self.printTrace(self.id, canDispose='')
self.canDispose=self.env.event()
# if the event that activated the thread is canDispose then signalReceiver
if self.haveToDispose():
if self.receiver:
if not self.receiver.entryIsAssignedTo():
# try to signal receiver. In case of failure signal giver (for synchronization issues)
if not self.signalReceiver():
self.signalGiver()
continue
self.signalReceiver()
# signal the giver (for synchronization issues)
self.signalGiver()
# =======================================================================
# checks if the Queue can accept an entity
# it checks also who called it and returns TRUE
# only to the predecessor that will give the entity.
# =======================================================================
def canAccept(self, callerObject=None):
activeObjectQueue=self.Res.users
#if we have only one predecessor just check if there is a place available
# this is done to achieve better (cpu) processing time
# then we can also use it as a filter for a yield method
if(callerObject==None):
return len(activeObjectQueue)<self.capacity
thecaller=callerObject
return len(activeObjectQueue)<self.capacity and (self.isInRouteOf(thecaller))
# =======================================================================
# checks if the Queue can dispose an entity to the following object
# it checks also who called it and returns TRUE
# only to the receiver that will give the entity.
# this is kind of slow I think got to check
# =======================================================================
def haveToDispose(self, callerObject=None):
activeObjectQueue=self.Res.users
#if we have only one possible receiver just check if the Queue holds one or more entities
if(callerObject==None):
return len(activeObjectQueue)>0
thecaller=callerObject
return len(activeObjectQueue)>0 and thecaller.isInRouteOf(self)
# =======================================================================
# removes an entity from the Object
# =======================================================================
def removeEntity(self, entity=None):
activeEntity=CoreObject.removeEntity(self, entity) #run the default method
if self.canAccept():
self.signalGiver()
# TODO: disable that for the mouldAssemblyBuffer
if not self.__class__.__name__=='MouldAssemblyBufferManaged':
if self.haveToDispose():
# self.printTrace(self.id, attemptSignalReceiver='(removeEntity)')
self.signalReceiver()
# reset the signals for the Queue. It be in the start of the loop for now
# xxx consider to dothis in all CoreObjects
self.expectedSignals['isRequested']=1
self.expectedSignals['canDispose']=1
self.expectedSignals['loadOperatorAvailable']=1
# check if the queue is empty, if yes then try to signal the router, operators may need reallocation
try:
if self.level:
if not len(self.getActiveObjectQueue()) and self.checkForDedicatedOperators():
self.requestAllocation()
except:
pass
return activeEntity
# =======================================================================
# checks if the Queue can accept an entity and
# there is an entity in some predecessor waiting for it
# also updates the predecessorIndex to the one that is to be taken
# =======================================================================
def canAcceptAndIsRequested(self,callerObject=None):
activeObjectQueue=self.Res.users
giverObject=callerObject
assert giverObject, 'there must be a caller for canAcceptAndIsRequested'
return len(activeObjectQueue)<self.capacity and giverObject.haveToDispose(self)
# =======================================================================
# gets an entity from the predecessor that
# the predecessor index points to
# =======================================================================
def getEntity(self):
activeEntity=CoreObject.getEntity(self) #run the default behavior
# if the level is reached then try to signal the Router to reallocate the operators
try:
if self.level:
if len(self.getActiveObjectQueue())==self.level and self.checkForDedicatedOperators():
self.requestAllocation()
except:
pass
return activeEntity
#===========================================================================
# checks whether the entity can proceed to a successor object
#===========================================================================
def canDeliver(self, entity=None):
assert self.isInActiveQueue(entity), entity.id +' not in the internalQueue of'+ self.id
activeEntity=entity
mayProceed=False
# for all the possible receivers of an entity check whether they can accept and then set accordingly the canProceed flag of the entity
for nextObject in [object for object in self.next if object.canAcceptEntity(activeEntity)]:
activeEntity.proceed=True
activeEntity.candidateReceivers.append(nextObject)
mayProceed=True
return mayProceed
# =======================================================================
# sorts the Entities of the Queue according to the scheduling rule
# =======================================================================
def sortEntities(self):
#if we have sorting according to multiple criteria we have to call the sorter many times
if self.schedulingRule=="MC":
for criterion in reversed(self.multipleCriterionList):
self.activeQSorter(criterion=criterion)
#else we just use the default scheduling rule
else:
self.activeQSorter()
# =======================================================================
# sorts the Entities of the Queue according to the scheduling rule
# =======================================================================
def activeQSorter(self, criterion=None):
activeObjectQ=self.Res.users
if criterion==None:
criterion=self.schedulingRule
#if the schedulingRule is first in first out
if criterion=="FIFO":
pass
#if the schedulingRule is based on a pre-defined priority
elif criterion=="Priority":
activeObjectQ.sort(key=lambda x: x.priority)
#if the schedulingRule is earliest due date
elif criterion=="EDD":
activeObjectQ.sort(key=lambda x: x.dueDate)
#if the schedulingRule is earliest order date
elif criterion=="EOD":
activeObjectQ.sort(key=lambda x: x.orderDate)
#if the schedulingRule is to sort Entities according to the stations they have to visit
elif criterion=="NumStages":
activeObjectQ.sort(key=lambda x: len(x.remainingRoute), reverse=True)
#if the schedulingRule is to sort Entities according to the their remaining processing time in the system
elif criterion=="RPC":
for entity in activeObjectQ:
RPT=0
for step in entity.remainingRoute:
processingTime=step.get('processingTime',None)
if processingTime:
RPT+=float(processingTime.get('Fixed',{}).get('mean',0))
entity.totalRemainingProcessingTime=RPT
activeObjectQ.sort(key=lambda x: x.totalRemainingProcessingTime, reverse=True)
#if the schedulingRule is to sort Entities according to longest processing time first in the next station
elif criterion=="LPT":
for entity in activeObjectQ:
processingTime = entity.remainingRoute[0].get('processingTime',None)
if processingTime:
entity.processingTimeInNextStation=float(processingTime.get('Fixed',{}).get('mean',0))
else:
entity.processingTimeInNextStation=0
activeObjectQ.sort(key=lambda x: x.processingTimeInNextStation, reverse=True)
#if the schedulingRule is to sort Entities according to shortest processing time first in the next station
elif criterion=="SPT":
for entity in activeObjectQ:
processingTime = entity.remainingRoute[0].get('processingTime',None)
if processingTime:
entity.processingTimeInNextStation=float(processingTime.get('Fixed',{}).get('mean',0))
else:
entity.processingTimeInNextStation=0
activeObjectQ.sort(key=lambda x: x.processingTimeInNextStation)
#if the schedulingRule is to sort Entities based on the minimum slackness
elif criterion=="MS":
for entity in activeObjectQ:
RPT=0
for step in entity.remainingRoute:
processingTime=step.get('processingTime',None)
if processingTime:
RPT+=float(processingTime.get('Fixed',{}).get('mean',0))
entity.totalRemainingProcessingTime=RPT
activeObjectQ.sort(key=lambda x: (x.dueDate-x.totalRemainingProcessingTime))
#if the schedulingRule is to sort Entities based on the length of the following Queue
elif criterion=="WINQ":
from Globals import G
for entity in activeObjectQ:
if len(entity.remainingRoute)>1:
nextObjIds=entity.remainingRoute[1].get('stationIdsList',[])
for obj in G.ObjList:
if obj.id in nextObjIds:
nextObject=obj
entity.nextQueueLength=len(nextObject.Res.users)
else:
entity.nextQueueLength=0
activeObjectQ.sort(key=lambda x: x.nextQueueLength)
else:
assert False, "Unknown scheduling criterion %r" % (criterion, )
def outputResultsJSON(self):
from Globals import G
json = {'_class': 'Dream.%s' % self.__class__.__name__,
'id': str(self.id),
'family': self.family,
'results': {} }
if self.gatherWipStat:
json['results']['wip_stat_list']=self.WipStat
G.outputJSON['elementList'].append(json)
|
nexedi/dream
|
dream/simulation/Queue.py
|
Python
|
gpl-3.0
| 17,273
|
[
"VisIt"
] |
0695e60f5c707184f71326125805bc9cd1c308a39d8917d56e73000524dcb890
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from glob import glob
from horton import ProAtomDB, context
__all__ = [
'get_proatomdb_cp2k', 'get_proatomdb_hf_sto3g',
'get_proatomdb_hf_lan', 'check_names', 'check_proatom_splines',
]
def get_proatomdb_cp2k():
'''Return a proatomdb of pseudo oxygens and one silicon for testing purposes'''
fns = glob(context.get_fn('test/atom_*.cp2k.out'))
return ProAtomDB.from_files(fns)
def get_proatomdb_hf_sto3g():
'''Return a proatomdb of H and O at hf/sto-3g for testing purposes'''
fns = glob(context.get_fn('test/atom_???_???_hf_sto3g.fchk'))
return ProAtomDB.from_files(fns)
def get_proatomdb_hf_lan():
'''Return a proatomdb of H, O, Si at hf/LANL2MB for testing purposes'''
fns = glob(context.get_fn('test/atom_???_???_hf_lan.fchk'))
return ProAtomDB.from_files(fns)
def check_names(names, part):
for name in names:
assert name in part.cache
def check_proatom_splines(part):
for index in range(part.natom):
spline = part.get_proatom_spline(index)
grid = part.get_grid(index)
array1 = grid.zeros()
part.eval_spline(index, spline, array1, grid)
array2 = grid.zeros()
part.eval_proatom(index, array2, grid)
assert abs(array1).max() != 0.0
assert abs(array1 - array2).max() < 1e-5
|
theochem/horton
|
horton/part/test/common.py
|
Python
|
gpl-3.0
| 2,120
|
[
"CP2K"
] |
fda7c312c7587afa6990b4664759abbda214732105dcc7f352c330f0c4cd82a3
|
#!/usr/bin/env python3
from fontTools.ttLib import TTFont
import sys
import json
# map of characters to extract
metrics_to_extract = {
# Font name
"AMS-Regular": {
u"\u21e2": None, # \dashrightarrow
u"\u21e0": None, # \dashleftarrow
},
"Main-Regular": {
# Skew and italic metrics can't be easily parsed from the TTF. Instead,
# we map each character to a "base character", which is a character
# from the same font with correct italic and skew metrics. A character
# maps to None if it doesn't have a base.
#u"\u2209": None, # \notin
#u"\u2260": None, # \neq
u"\u2245": None, # \cong
u"\u2026": None, # \ldots
u"\u22ef": None, # \cdots
u"\u22f1": None, # \ddots
u"\u22ee": None, # \vdots
u"\u22ee": None, # \vdots
u"\u22a8": None, # \models
u"\u22c8": None, # \bowtie
u"\u2250": None, # \doteq
u"\u23b0": None, # \lmoustache
u"\u23b1": None, # \rmoustache
u"\u27ee": None, # \lgroup
u"\u27ef": None, # \rgroup
u"\u27f5": None, # \longleftarrow
u"\u27f8": None, # \Longleftarrow
u"\u27f6": None, # \longrightarrow
u"\u27f9": None, # \Longrightarrow
u"\u27f7": None, # \longleftrightarrow
u"\u27fa": None, # \Longleftrightarrow
u"\u21a6": None, # \mapsto
u"\u27fc": None, # \longmapsto
u"\u21a9": None, # \hookleftarrow
u"\u21aa": None, # \hookrightarrow
u"\u21cc": None, # \rightleftharpoons
},
"Main-Bold": {
u"\u2245": None, # \cong
},
"Size1-Regular": {
u"\u222c": u"\u222b", # \iint, based on \int
u"\u222d": u"\u222b", # \iiint, based on \int
},
"Size2-Regular": {
u"\u222c": u"\u222b", # \iint, based on \int
u"\u222d": u"\u222b", # \iiint, based on \int
},
}
def main():
start_json = json.load(sys.stdin)
for font in start_json:
fontInfo = TTFont("../../fonts/KaTeX_" + font + ".ttf")
glyf = fontInfo["glyf"]
widths = fontInfo.getGlyphSet()
unitsPerEm = float(fontInfo["head"].unitsPerEm)
# We keep ALL Unicode cmaps, not just fontInfo["cmap"].getcmap(3, 1).
# This is playing it extra safe, since it reports inconsistencies.
# Platform 0 is Unicode, platform 3 is Windows. For platform 3,
# encoding 1 is UCS-2 and encoding 10 is UCS-4.
cmap = [t.cmap for t in fontInfo["cmap"].tables
if (t.platformID == 0)
or (t.platformID == 3 and t.platEncID in (1, 10))]
chars = metrics_to_extract.get(font, {})
chars[u"\u0020"] = None # space
chars[u"\u00a0"] = None # nbsp
for char, base_char in chars.items():
code = ord(char)
names = set(t.get(code) for t in cmap)
if not names:
sys.stderr.write(
"Codepoint {} of font {} maps to no name\n"
.format(code, font))
continue
if len(names) != 1:
sys.stderr.write(
"Codepoint {} of font {} maps to multiple names: {}\n"
.format(code, font, ", ".join(sorted(names))))
continue
name = names.pop()
height = depth = italic = skew = width = 0
glyph = glyf[name]
if glyph.numberOfContours:
height = glyph.yMax / unitsPerEm
depth = -glyph.yMin / unitsPerEm
width = widths[name].width / unitsPerEm
if base_char:
base_char_str = str(ord(base_char))
base_metrics = start_json[font][base_char_str]
italic = base_metrics["italic"]
skew = base_metrics["skew"]
width = base_metrics["width"]
start_json[font][str(code)] = {
"height": height,
"depth": depth,
"italic": italic,
"skew": skew,
"width": width
}
sys.stdout.write(
json.dumps(start_json, separators=(',', ':'), sort_keys=True))
if __name__ == "__main__":
main()
|
Khan/KaTeX
|
src/metrics/extract_ttfs.py
|
Python
|
mit
| 4,278
|
[
"Bowtie"
] |
89ac56c19c6be1f814c907cedaa101246193bd11011e32877e79bc45623214b0
|
"""
Find modules used by a script, using bytecode analysis.
Based on the stdlib modulefinder by Thomas Heller and Just van Rossum,
but uses a graph data structure and 2.3 features
XXX: Verify all calls to _import_hook (and variants) to ensure that
imports are done in the right way.
"""
from __future__ import absolute_import, print_function
#FIXME: To decrease the likelihood of ModuleGraph exceeding the recursion limit
#and hence unpredictably raising fatal exceptions, increase the recursion
#limit at PyInstaller startup (i.e., in the
#PyInstaller.building.build_main.build() function). For details, see:
# https://github.com/pyinstaller/pyinstaller/issues/1919#issuecomment-216016176
import pkg_resources
import ast
import codecs
import dis
import imp
import marshal
import os
import pkgutil
import sys
import re
from collections import deque, namedtuple
from struct import unpack
import warnings
from altgraph.ObjectGraph import ObjectGraph
from altgraph import GraphError
from . import util
from . import zipio
from ._compat import get_instructions, BytesIO, StringIO, \
pathname2url, _cOrd, _READ_MODE
BOM = codecs.BOM_UTF8.decode('utf-8')
#FIXME: Leverage this rather than magic numbers below.
ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL = -1
"""
Constant instructing the builtin `__import__()` function to attempt both
absolute and relative imports.
"""
#FIXME: Leverage this rather than magic numbers below.
ABSOLUTE_IMPORT_LEVEL = 0
"""
Constant instructing the builtin `__import__()` function to attempt only
absolute imports.
"""
#FIXME: Leverage this rather than magic numbers below.
DEFAULT_IMPORT_LEVEL = (
ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL if sys.version_info[0] == 2 else
ABSOLUTE_IMPORT_LEVEL)
"""
Constant instructing the builtin `__import__()` function to attempt the default
import style specific to the active Python interpreter.
Specifically, under:
* Python 2, this defaults to attempting both absolute and relative imports.
* Python 3, this defaults to attempting only absolute imports.
"""
# TODO: Refactor all uses of explicit filetypes in this module *AND* of the
# imp.get_suffixes() function to use this dictionary instead. Unfortunately,
# tests for explicit filetypes (e.g., ".py") are non-portable. Under Windows,
# for example, both the ".py" *AND* ".pyw" filetypes signify valid uncompiled
# Python modules.
# TODO: The imp.get_suffixes() function (in fact, the entire "imp" package) has
# been deprecated as of Python 3.3 by the importlib.machinery.all_suffixes()
# function, which largely performs the same role. Unfortunately, the latter
# function was only introduced with Python 3.3. Since PyInstaller requires
# Python >= 3.3 when running under Python 3, refactor this as follows:
#
# * Under Python 2, continue calling imp.get_suffixes().
# * Under Python 3, call importlib.machinery.all_suffixes() instead.
_IMPORTABLE_FILETYPE_TO_METADATA = {
filetype: (filetype, open_mode, imp_type)
for filetype, open_mode, imp_type in imp.get_suffixes()
}
"""
Dictionary mapping the filetypes of importable files to the 3-tuple of metadata
describing such files returned by the `imp.get_suffixes()` function whose first
element is that filetype.
This dictionary simplifies platform-portable importation of importable files,
including:
* Uncompiled modules suffixed by `.py` (as well as `.pyw` under Windows).
* Compiled modules suffixed by either `.pyc` or `.pyo`.
* C extensions suffixed by the platform-specific shared library filetype (e.g.,
`.so` under Linux, `.dll` under Windows).
The keys of this dictionary are `.`-prefixed filetypes (e.g., `.py`, `.so');
the values of this dictionary are 3-tuples whose:
1. First element is the same `.`-prefixed filetype.
1. Second element is the mode to be passed to the `open()` built-in to open
files of that filetype under the current platform and Python interpreter
(e.g., `rU` for the `.py` filetype under Python 2, `r` for the same
filetype under Python 3).
1. Third element is a magic number specific to the `imp` module (e.g.,
`imp.C_EXTENSION` for filetypes corresponding to C extensions).
"""
# Modulegraph does a good job at simulating Python's, but it can not
# handle packagepath modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
#
# Note this is a mapping is lists of paths.
_packagePathMap = {}
# Prefix used in magic .pth files used by setuptools to create namespace
# packages without an __init__.py file.
#
# The value is a list of such prefixes as the prefix varies with versions of
# setuptools.
_SETUPTOOLS_NAMESPACEPKG_PTHs=(
# setuptools 31.0.0
("import sys, types, os;has_mfs = sys.version_info > (3, 5);"
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('"),
# distribute 0.6.10
("import sys,types,os; p = os.path.join("
"sys._getframe(1).f_locals['sitedir'], *('"),
# setuptools 0.6c9, distribute 0.6.12
("import sys,new,os; p = os.path.join(sys._getframe("
"1).f_locals['sitedir'], *('"),
# setuptools 28.1.0
("import sys, types, os;p = os.path.join("
"sys._getframe(1).f_locals['sitedir'], *('"),
# setuptools 28.7.0
("import sys, types, os;pep420 = sys.version_info > (3, 3);"
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('"),
)
class InvalidRelativeImportError (ImportError):
pass
def _namespace_package_path(fqname, pathnames, path=None):
"""
Return the __path__ for the python package in *fqname*.
This function uses setuptools metadata to extract information
about namespace packages from installed eggs.
"""
working_set = pkg_resources.WorkingSet(path)
path = list(pathnames)
for dist in working_set:
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata(
'namespace_packages.txt').splitlines()
if fqname in namespaces:
nspath = os.path.join(dist.location, *fqname.split('.'))
if nspath not in path:
path.append(nspath)
return path
_strs = re.compile(r'''^\s*["']([A-Za-z0-9_]+)["'],?\s*''') # "<- emacs happy
def _eval_str_tuple(value):
"""
Input is the repr of a tuple of strings, output
is that tuple.
This only works with a tuple where the members are
python identifiers.
"""
if not (value.startswith('(') and value.endswith(')')):
raise ValueError(value)
orig_value = value
value = value[1:-1]
result = []
while value:
m = _strs.match(value)
if m is None:
raise ValueError(orig_value)
result.append(m.group(1))
value = value[len(m.group(0)):]
return tuple(result)
def _path_from_importerror(exc, default):
# This is a hack, but sadly enough the necessary information
# isn't available otherwise.
m = re.match(r'^No module named (\S+)$', str(exc))
if m is not None:
return m.group(1)
return default
def os_listdir(path):
"""
Deprecated name
"""
warnings.warn(
"Use zipio.listdir instead of os_listdir",
DeprecationWarning)
return zipio.listdir(path)
def _code_to_file(co):
""" Convert code object to a .pyc pseudo-file """
if sys.version_info >= (3, 7):
header = imp.get_magic() + (b'\0' * 12)
elif sys.version_info >= (3, 4):
header = imp.get_magic() + (b'\0' * 8)
else:
header = imp.get_magic() + (b'\0' * 4)
return BytesIO(header + marshal.dumps(co))
def moduleInfoForPath(path):
for (ext, readmode, typ) in imp.get_suffixes():
if path.endswith(ext):
return os.path.basename(path)[:-len(ext)], readmode, typ
return None
def AddPackagePath(packagename, path):
warnings.warn(
"Use addPackagePath instead of AddPackagePath",
DeprecationWarning)
addPackagePath(packagename, path)
def addPackagePath(packagename, path):
paths = _packagePathMap.get(packagename, [])
paths.append(path)
_packagePathMap[packagename] = paths
_replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleGraph.
def ReplacePackage(oldname, newname):
warnings.warn("use replacePackage instead of ReplacePackage",
DeprecationWarning)
replacePackage(oldname, newname)
def replacePackage(oldname, newname):
_replacePackageMap[oldname] = newname
#FIXME: What is this? Do we actually need this? This appears to provide
#significantly more fine-grained metadata than PyInstaller will ever require.
#It consumes a great deal of space (slots or no slots), since we store an
#instance of this class for each edge of the graph.
class DependencyInfo (namedtuple("DependencyInfo",
["conditional", "function", "tryexcept", "fromlist"])):
__slots__ = ()
def _merged(self, other):
if (not self.conditional and not self.function and not self.tryexcept) \
or (not other.conditional and not other.function and not other.tryexcept):
return DependencyInfo(
conditional=False,
function=False,
tryexcept=False,
fromlist=self.fromlist and other.fromlist)
else:
return DependencyInfo(
conditional=self.conditional or other.conditional,
function=self.function or other.function,
tryexcept=self.tryexcept or other.tryexcept,
fromlist=self.fromlist and other.fromlist)
#FIXME: Shift the following Node class hierarchy into a new
#"PyInstaller.lib.modulegraph.node" module. This module is much too long.
#FIXME: Refactor "_deferred_imports" from a tuple into a proper lightweight
#class leveraging "__slots__". If not for backward compatibility, we'd just
#leverage a named tuple -- but this should do just as well.
#FIXME: Move the "packagepath" attribute into the "Package" class. Only
#packages define the "__path__" special attribute. The codebase currently
#erroneously tests whether "module.packagepath is not None" to determine
#whether a node is a package or not. However, "isinstance(module, Package)" is
#a significantly more reliable test. Refactor the former into the latter.
class Node(object):
"""
Abstract base class (ABC) of all objects added to a `ModuleGraph`.
Attributes
----------
code : codeobject
Code object of the pure-Python module corresponding to this graph node
if any _or_ `None` otherwise.
graphident : str
Synonym of `identifier` required by the `ObjectGraph` superclass of the
`ModuleGraph` class. For readability, the `identifier` attribute should
typically be used instead.
filename : str
Absolute path of this graph node's corresponding module, package, or C
extension if any _or_ `None` otherwise.
identifier : str
Fully-qualified name of this graph node's corresponding module,
package, or C extension.
packagepath : str
List of the absolute paths of all directories comprising this graph
node's corresponding package. If this is a:
* Non-namespace package, this list contains exactly one path.
* Namespace package, this list contains one or more paths.
_deferred_imports : list
List of all target modules imported by the source module corresponding
to this graph node whole importations have been deferred for subsequent
processing in between calls to the `_ModuleGraph._scan_code()` and
`_ModuleGraph._process_imports()` methods for this source module _or_
`None` otherwise. Each element of this list is a 3-tuple
`(have_star, _safe_import_hook_args, _safe_import_hook_kwargs)`
collecting the importation of a target module from this source module
for subsequent processing, where:
* `have_star` is a boolean `True` only if this is a `from`-style star
import (e.g., resembling `from {target_module_name} import *`).
* `_safe_import_hook_args` is a (typically non-empty) sequence of all
positional arguments to be passed to the `_safe_import_hook()` method
to add this importation to the graph.
* `_safe_import_hook_kwargs` is a (typically empty) dictionary of all
keyword arguments to be passed to the `_safe_import_hook()` method
to add this importation to the graph.
Unlike functional languages, Python imposes a maximum depth on the
interpreter stack (and hence recursion). On breaching this depth,
Python raises a fatal `RuntimeError` exception. Since `ModuleGraph`
parses imports recursively rather than iteratively, this depth _was_
commonly breached before the introduction of this list. Python
environments installing a large number of modules (e.g., Anaconda) were
particularly susceptible. Why? Because `ModuleGraph` concurrently
descended through both the abstract syntax trees (ASTs) of all source
modules being parsed _and_ the graph of all target modules imported by
these source modules being built. The stack thus consisted of
alternating layers of AST and graph traversal. To unwind such
alternation and effectively halve the stack depth, `ModuleGraph` now
descends through the abstract syntax tree (AST) of each source module
being parsed and adds all importations originating within this module
to this list _before_ descending into the graph of these importations.
See pyinstaller/pyinstaller/#1289 for further details.
_global_attr_names : set
Set of the unqualified names of all global attributes (e.g., classes,
variables) defined in the pure-Python module corresponding to this
graph node if any _or_ the empty set otherwise. This includes the names
of all attributes imported via `from`-style star imports from other
existing modules (e.g., `from {target_module_name} import *`). This
set is principally used to differentiate the non-ignorable importation
of non-existent submodules in a package from the ignorable importation
of existing global attributes defined in that package's pure-Python
`__init__` submodule in `from`-style imports (e.g., `bar` in
`from foo import bar`, which may be either a submodule or attribute of
`foo`), as such imports ambiguously allow both. This set is _not_ used
to differentiate submodules from attributes in `import`-style imports
(e.g., `bar` in `import foo.bar`, which _must_ be a submodule of
`foo`), as such imports unambiguously allow only submodules.
_starimported_ignored_module_names : set
Set of the fully-qualified names of all existing unparsable modules
that the existing parsable module corresponding to this graph node
attempted to perform one or more "star imports" from. If this module
either does _not_ exist or does but is unparsable, this is the empty
set. Equivalently, this set contains each fully-qualified name
`{trg_module_name}` for which:
* This module contains an import statement of the form
`from {trg_module_name} import *`.
* The module whose name is `{trg_module_name}` exists but is _not_
parsable by `ModuleGraph` (e.g., due to _not_ being pure-Python).
**This set is currently defined but otherwise ignored.**
_submodule_basename_to_node : dict
Dictionary mapping from the unqualified name of each submodule
contained by the parent module corresponding to this graph node to that
submodule's graph node. If this dictionary is non-empty, this parent
module is typically but _not_ always a package (e.g., the non-package
`os` module containing the `os.path` submodule).
"""
__slots__ = [
'code',
'filename',
'graphident',
'identifier',
'packagepath',
'_deferred_imports',
'_global_attr_names',
'_starimported_ignored_module_names',
'_submodule_basename_to_node',
]
def __init__(self, identifier):
"""
Initialize this graph node.
Parameters
----------
identifier : str
Fully-qualified name of this graph node's corresponding module,
package, or C extension.
"""
self.code = None
self.filename = None
self.graphident = identifier
self.identifier = identifier
self.packagepath = None
self._deferred_imports = None
self._global_attr_names = set()
self._starimported_ignored_module_names = set()
self._submodule_basename_to_node = dict()
def is_global_attr(self, attr_name):
"""
`True` only if the pure-Python module corresponding to this graph node
defines a global attribute (e.g., class, variable) with the passed
name.
If this module is actually a package, this method instead returns
`True` only if this package's pure-Python `__init__` submodule defines
such a global attribute. In this case, note that this package may still
contain an importable submodule of the same name. Callers should
attempt to import this attribute as a submodule of this package
_before_ assuming this attribute to be an ignorable global. See
"Examples" below for further details.
Parameters
----------
attr_name : str
Unqualified name of the attribute to be tested.
Returns
----------
bool
`True` only if this module defines this global attribute.
Examples
----------
Consider a hypothetical module `foo` containing submodules `bar` and
`__init__` where the latter assigns `bar` to be a global variable
(possibly star-exported via the special `__all__` global variable):
>>> # In "foo.__init__":
>>> bar = 3.1415
Python 2 and 3 both permissively permit this. This method returns
`True` in this case (i.e., when called on the `foo` package's graph
node, passed the attribute name `bar`) despite the importability of the
`foo.bar` submodule.
"""
return attr_name in self._global_attr_names
def is_submodule(self, submodule_basename):
"""
`True` only if the parent module corresponding to this graph node
contains the submodule with the passed name.
If `True`, this parent module is typically but _not_ always a package
(e.g., the non-package `os` module containing the `os.path` submodule).
Parameters
----------
submodule_basename : str
Unqualified name of the submodule to be tested.
Returns
----------
bool
`True` only if this parent module contains this submodule.
"""
return submodule_basename in self._submodule_basename_to_node
def add_global_attr(self, attr_name):
"""
Record the global attribute (e.g., class, variable) with the passed
name to be defined by the pure-Python module corresponding to this
graph node.
If this module is actually a package, this method instead records this
attribute to be defined by this package's pure-Python `__init__`
submodule.
Parameters
----------
attr_name : str
Unqualified name of the attribute to be added.
"""
self._global_attr_names.add(attr_name)
def add_global_attrs_from_module(self, target_module):
"""
Record all global attributes (e.g., classes, variables) defined by the
target module corresponding to the passed graph node to also be defined
by the source module corresponding to this graph node.
If the source module is actually a package, this method instead records
these attributes to be defined by this package's pure-Python `__init__`
submodule.
Parameters
----------
target_module : Node
Graph node of the target module to import attributes from.
"""
self._global_attr_names.update(target_module._global_attr_names)
def add_submodule(self, submodule_basename, submodule_node):
"""
Add the submodule with the passed name and previously imported graph
node to the parent module corresponding to this graph node.
This parent module is typically but _not_ always a package (e.g., the
non-package `os` module containing the `os.path` submodule).
Parameters
----------
submodule_basename : str
Unqualified name of the submodule to add to this parent module.
submodule_node : Node
Graph node of this submodule.
"""
self._submodule_basename_to_node[submodule_basename] = submodule_node
def get_submodule(self, submodule_basename):
"""
Graph node of the submodule with the passed name in the parent module
corresponding to this graph node.
If this parent module does _not_ contain this submodule, an exception
is raised. Else, this parent module is typically but _not_ always a
package (e.g., the non-package `os` module containing the `os.path`
submodule).
Parameters
----------
module_basename : str
Unqualified name of the submodule to retrieve.
Returns
----------
Node
Graph node of this submodule.
"""
return self._submodule_basename_to_node[submodule_basename]
def get_submodule_or_none(self, submodule_basename):
"""
Graph node of the submodule with the passed unqualified name in the
parent module corresponding to this graph node if this module contains
this submodule _or_ `None`.
This parent module is typically but _not_ always a package (e.g., the
non-package `os` module containing the `os.path` submodule).
Parameters
----------
submodule_basename : str
Unqualified name of the submodule to retrieve.
Returns
----------
Node
Graph node of this submodule if this parent module contains this
submodule _or_ `None`.
"""
return self._submodule_basename_to_node.get(submodule_basename)
def remove_global_attr_if_found(self, attr_name):
"""
Record the global attribute (e.g., class, variable) with the passed
name if previously recorded as defined by the pure-Python module
corresponding to this graph node to be subsequently undefined by the
same module.
If this module is actually a package, this method instead records this
attribute to be undefined by this package's pure-Python `__init__`
submodule.
This method is intended to be called on globals previously defined by
this module that are subsequently undefined via the `del` built-in by
this module, thus "forgetting" or "undoing" these globals.
For safety, there exists no corresponding `remove_global_attr()`
method. While defining this method is trivial, doing so would invite
`KeyError` exceptions on scanning valid Python that lexically deletes a
global in a scope under this module's top level (e.g., in a function)
_before_ defining this global at this top level. Since `ModuleGraph`
cannot and should not (re)implement a full-blown Python interpreter,
ignoring out-of-order deletions is the only sane policy.
Parameters
----------
attr_name : str
Unqualified name of the attribute to be removed.
"""
if self.is_global_attr(attr_name):
self._global_attr_names.remove(attr_name)
def __cmp__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return cmp(self.graphident, otherIdent) # noqa: F821
def __eq__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return False
return self.graphident == otherIdent
def __ne__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return True
return self.graphident != otherIdent
def __lt__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident < otherIdent
def __le__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident <= otherIdent
def __gt__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident > otherIdent
def __ge__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident >= otherIdent
def __hash__(self):
return hash(self.graphident)
def infoTuple(self):
return (self.identifier,)
def __repr__(self):
return '%s%r' % (type(self).__name__, self.infoTuple())
# TODO: This indirection is, frankly, unnecessary. The
# ModuleGraph.alias_module() should directly add the desired AliasNode instance
# to the graph rather than indirectly adding an Alias instance to the
# "lazynodes" dictionary.
class Alias(str):
"""
Placeholder aliasing an existing source module to a non-existent target
module (i.e., the desired alias).
For obscure reasons, this class subclasses `str`. Each instance of this
class is the fully-qualified name of the existing source module being
aliased. Unlike the related `AliasNode` class, instances of this class are
_not_ actual nodes and hence _not_ added to the graph; they only facilitate
communication between the `ModuleGraph.alias_module()` and
`ModuleGraph.findNode()` methods.
"""
class AliasNode(Node):
"""
Graph node representing the aliasing of an existing source module under a
non-existent target module name (i.e., the desired alias).
"""
def __init__(self, name, node):
"""
Initialize this alias.
Parameters
----------
name : str
Fully-qualified name of the non-existent target module to be
created (as an alias of the existing source module).
node : Node
Graph node of the existing source module being aliased.
"""
super(AliasNode, self).__init__(name)
#FIXME: Why only some? Why not *EVERYTHING* except "graphident", which
#must remain equal to "name" for lookup purposes? This is, after all,
#an alias. The idea is for the two nodes to effectively be the same.
# Copy some attributes from this source module into this target alias.
for attr_name in (
'identifier', 'packagepath',
'_global_attr_names', '_starimported_ignored_module_names',
'_submodule_basename_to_node'):
if hasattr(node, attr_name):
setattr(self, attr_name, getattr(node, attr_name))
def infoTuple(self):
return (self.graphident, self.identifier)
class BadModule(Node):
pass
class ExcludedModule(BadModule):
pass
class MissingModule(BadModule):
pass
class InvalidRelativeImport (BadModule):
def __init__(self, relative_path, from_name):
identifier = relative_path
if relative_path.endswith('.'):
identifier += from_name
else:
identifier += '.' + from_name
super(InvalidRelativeImport, self).__init__(identifier)
self.relative_path = relative_path
self.from_name = from_name
def infoTuple(self):
return (self.relative_path, self.from_name)
class Script(Node):
def __init__(self, filename):
super(Script, self).__init__(filename)
self.filename = filename
def infoTuple(self):
return (self.filename,)
class BaseModule(Node):
def __init__(self, name, filename=None, path=None):
super(BaseModule, self).__init__(name)
self.filename = filename
self.packagepath = path
def infoTuple(self):
return tuple(filter(None, (self.identifier, self.filename, self.packagepath)))
class BuiltinModule(BaseModule):
pass
class SourceModule(BaseModule):
pass
class InvalidSourceModule(SourceModule):
pass
class CompiledModule(BaseModule):
pass
class InvalidCompiledModule(BaseModule):
pass
class Extension(BaseModule):
pass
class Package(BaseModule):
"""
Graph node representing a non-namespace package.
"""
pass
class NamespacePackage(Package):
"""
Graph node representing a namespace package.
"""
pass
class RuntimeModule(BaseModule):
"""
Graph node representing a non-package Python module dynamically defined at
runtime.
Most modules are statically defined on-disk as standard Python files.
Some modules, however, are dynamically defined in-memory at runtime
(e.g., `gi.repository.Gst`, dynamically defined by the statically
defined `gi.repository.__init__` module).
This node represents such a runtime module. Since this is _not_ a package,
all attempts to import submodules from this module in `from`-style import
statements (e.g., the `queue` submodule in `from six.moves import queue`)
will be silently ignored.
To ensure that the parent package of this module if any is also imported
and added to the graph, this node is typically added to the graph by
calling the `ModuleGraph.add_module()` method.
"""
pass
class RuntimePackage(Package):
"""
Graph node representing a non-namespace Python package dynamically defined
at runtime.
Most packages are statically defined on-disk as standard subdirectories
containing `__init__.py` files. Some packages, however, are dynamically
defined in-memory at runtime (e.g., `six.moves`, dynamically defined by
the statically defined `six` module).
This node represents such a runtime package. All attributes imported from
this package in `from`-style import statements that are submodules of this
package (e.g., the `queue` submodule in `from six.moves import queue`) will
be imported rather than ignored.
To ensure that the parent package of this package if any is also imported
and added to the graph, this node is typically added to the graph by
calling the `ModuleGraph.add_module()` method.
"""
pass
#FIXME: Safely removable. We don't actually use this anywhere. After removing
#this class, remove the corresponding entry from "compat".
class FlatPackage(BaseModule):
def __init__(self, *args, **kwds):
warnings.warn(
"This class will be removed in a future version of modulegraph",
DeprecationWarning)
super(FlatPackage, *args, **kwds)
#FIXME: Safely removable. We don't actually use this anywhere. After removing
#this class, remove the corresponding entry from "compat".
class ArchiveModule(BaseModule):
def __init__(self, *args, **kwds):
warnings.warn(
"This class will be removed in a future version of modulegraph",
DeprecationWarning)
super(FlatPackage, *args, **kwds)
# HTML templates for ModuleGraph generator
header = """\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>%(TITLE)s</title>
<style>
.node { padding: 0.5em 0 0.5em; border-top: thin grey dotted; }
.moduletype { font: smaller italic }
.node a { text-decoration: none; color: #006699; }
.node a:visited { text-decoration: none; color: #2f0099; }
</style>
</head>
<body>
<h1>%(TITLE)s</h1>"""
entry = """
<div class="node">
<a name="%(NAME)s"></a>
%(CONTENT)s
</div>"""
contpl = """<tt>%(NAME)s</tt> <span class="moduletype">%(TYPE)s</span>"""
contpl_linked = """\
<a target="code" href="%(URL)s" type="text/plain"><tt>%(NAME)s</tt></a>
<span class="moduletype">%(TYPE)s</span>"""
imports = """\
<div class="import">
%(HEAD)s:
%(LINKS)s
</div>
"""
footer = """
</body>
</html>"""
def _ast_names(names):
result = []
for nm in names:
if isinstance(nm, ast.alias):
result.append(nm.name)
else:
result.append(nm)
result = [r for r in result if r != '__main__']
return result
def uniq(seq):
"""Remove duplicates from a list, preserving order"""
# Taken from https://stackoverflow.com/questions/480214
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
if sys.version_info[0] == 2:
DEFAULT_IMPORT_LEVEL = -1
else:
DEFAULT_IMPORT_LEVEL = 0
class _Visitor(ast.NodeVisitor):
def __init__(self, graph, module):
self._graph = graph
self._module = module
self._level = DEFAULT_IMPORT_LEVEL
self._in_if = [False]
self._in_def = [False]
self._in_tryexcept = [False]
@property
def in_if(self):
return self._in_if[-1]
@property
def in_def(self):
return self._in_def[-1]
@property
def in_tryexcept(self):
return self._in_tryexcept[-1]
def _collect_import(self, name, fromlist, level):
if sys.version_info[0] == 2:
if name == '__future__' and 'absolute_import' in (fromlist or ()):
self._level = 0
have_star = False
if fromlist is not None:
fromlist = uniq(fromlist)
if '*' in fromlist:
fromlist.remove('*')
have_star = True
# Record this import as originating from this module for subsequent
# handling by the _process_imports() method.
self._module._deferred_imports.append(
(have_star,
(name, self._module, fromlist, level),
{'edge_attr': DependencyInfo(
conditional=self.in_if,
tryexcept=self.in_tryexcept,
function=self.in_def,
fromlist=False)}))
def visit_Import(self, node):
for nm in _ast_names(node.names):
self._collect_import(nm, None, self._level)
def visit_ImportFrom(self, node):
level = node.level if node.level != 0 else self._level
self._collect_import(node.module or '', _ast_names(node.names), level)
def visit_If(self, node):
self._in_if.append(True)
self.generic_visit(node)
self._in_if.pop()
def visit_FunctionDef(self, node):
self._in_def.append(True)
self.generic_visit(node)
self._in_def.pop()
visit_AsyncFunctionDef = visit_FunctionDef
def visit_Try(self, node):
self._in_tryexcept.append(True)
self.generic_visit(node)
self._in_tryexcept.pop()
def visit_TryExcept(self, node):
self._in_tryexcept.append(True)
self.generic_visit(node)
self._in_tryexcept.pop()
def visit_Expression(self, node):
# Expression node's cannot contain import statements or
# other nodes that are relevant for us.
pass
# Expression isn't actually used as such in AST trees,
# therefore define visitors for all kinds of expression nodes.
visit_BoolOp = visit_Expression
visit_BinOp = visit_Expression
visit_UnaryOp = visit_Expression
visit_Lambda = visit_Expression
visit_IfExp = visit_Expression
visit_Dict = visit_Expression
visit_Set = visit_Expression
visit_ListComp = visit_Expression
visit_SetComp = visit_Expression
visit_ListComp = visit_Expression
visit_GeneratorExp = visit_Expression
visit_Compare = visit_Expression
visit_Yield = visit_Expression
visit_YieldFrom = visit_Expression
visit_Await = visit_Expression
visit_Call = visit_Expression
visit_Await = visit_Expression
class ModuleGraph(ObjectGraph):
"""
Directed graph whose nodes represent modules and edges represent
dependencies between these modules.
"""
def createNode(self, cls, name, *args, **kw):
m = self.findNode(name)
if m is None:
#assert m is None, m
m = super(ModuleGraph, self).createNode(cls, name, *args, **kw)
return m
def __init__(self, path=None, excludes=(), replace_paths=(), implies=(), graph=None, debug=0):
super(ModuleGraph, self).__init__(graph=graph, debug=debug)
if path is None:
path = sys.path
self.path = path
self.lazynodes = {}
# excludes is stronger than implies
self.lazynodes.update(dict(implies))
for m in excludes:
self.lazynodes[m] = None
self.replace_paths = replace_paths
self.set_setuptools_nspackages()
# Maintain own list of package path mappings in the scope of Modulegraph
# object.
self._package_path_map = _packagePathMap
def set_setuptools_nspackages(self):
# This is used when running in the test-suite
self.nspackages = self._calc_setuptools_nspackages()
def _calc_setuptools_nspackages(self):
# Setuptools has some magic handling for namespace
# packages when using 'install --single-version-externally-managed'
# (used by system packagers and also by pip)
#
# When this option is used namespace packages are writting to
# disk *without* an __init__.py file, which means the regular
# import machinery will not find them.
#
# We therefore explicitly look for the hack used by
# setuptools to get this kind of namespace packages to work.
pkgmap = {}
try:
from pkgutil import ImpImporter
except ImportError:
try:
from _pkgutil import ImpImporter
except ImportError:
ImpImporter = pkg_resources.ImpWrapper
if sys.version_info[:2] >= (3, 3):
import importlib.machinery
ImpImporter = importlib.machinery.FileFinder
for entry in self.path:
importer = pkg_resources.get_importer(entry)
if isinstance(importer, ImpImporter):
try:
ldir = os.listdir(entry)
except os.error:
continue
for fn in ldir:
if fn.endswith('-nspkg.pth'):
with open(os.path.join(entry, fn), _READ_MODE) as fp:
for ln in fp:
for pfx in _SETUPTOOLS_NAMESPACEPKG_PTHs:
if ln.startswith(pfx):
try:
start = len(pfx)-2
stop = ln.index(')', start)+1
except ValueError:
continue
pkg = _eval_str_tuple(ln[start:stop])
identifier = ".".join(pkg)
subdir = os.path.join(entry, *pkg)
if os.path.exists(os.path.join(subdir, '__init__.py')):
# There is a real __init__.py,
# ignore the setuptools hack
continue
if identifier in pkgmap:
pkgmap[identifier].append(subdir)
else:
pkgmap[identifier] = [subdir]
break
return pkgmap
def implyNodeReference(self, node, other, edge_data=None):
"""
Create a reference from the passed source node to the passed other node,
implying the former to depend upon the latter.
While the source node _must_ be an existing graph node, the target node
may be either an existing graph node _or_ a fully-qualified module name.
In the latter case, the module with that name and all parent packages of
that module will be imported _without_ raising exceptions and for each
newly imported module or package:
* A new graph node will be created for that module or package.
* A reference from the passed source node to that module or package will
be created.
This method allows dependencies between Python objects _not_ importable
with standard techniques (e.g., module aliases, C extensions).
Parameters
----------
node : str
Graph node for this reference's source module or package.
other : {Node, str}
Either a graph node _or_ fully-qualified name for this reference's
target module or package.
"""
if isinstance(other, Node):
self._updateReference(node, other, edge_data)
else:
if isinstance(other, tuple):
raise ValueError(other)
others = self._safe_import_hook(other, node, None)
for other in others:
self._updateReference(node, other, edge_data)
def getReferences(self, fromnode):
"""
Yield all nodes that `fromnode` dependes on (that is,
all modules that `fromnode` imports.
"""
node = self.findNode(fromnode)
out_edges, _ = self.get_edges(node)
return out_edges
def getReferers(self, tonode, collapse_missing_modules=True):
node = self.findNode(tonode)
_, in_edges = self.get_edges(node)
if collapse_missing_modules:
for n in in_edges:
if isinstance(n, MissingModule):
for n in self.getReferers(n, False):
yield n
else:
yield n
else:
for n in in_edges:
yield n
def hasEdge(self, fromnode, tonode):
""" Return True iff there is an edge from 'fromnode' to 'tonode' """
fromnode = self.findNode(fromnode)
tonode = self.findNode(tonode)
return self.graph.edge_by_node(fromnode, tonode) is not None
def foldReferences(self, packagenode):
"""
Create edges to/from `packagenode` based on the edges to/from all
submodules of that package _and_ then hide the graph nodes
corresponding to those submodules.
"""
pkg = self.findNode(packagenode)
for n in self.nodes():
if not n.identifier.startswith(pkg.identifier + '.'):
continue
iter_out, iter_inc = self.get_edges(n)
for other in iter_out:
if other.identifier.startswith(pkg.identifier + '.'):
continue
if not self.hasEdge(pkg, other):
# Ignore circular dependencies
self._updateReference(pkg, other, 'pkg-internal-import')
for other in iter_inc:
if other.identifier.startswith(pkg.identifier + '.'):
# Ignore circular dependencies
continue
if not self.hasEdge(other, pkg):
self._updateReference(other, pkg, 'pkg-import')
self.graph.hide_node(n)
# TODO: unfoldReferences(pkg) that restore the submodule nodes and
# removes 'pkg-import' and 'pkg-internal-import' edges. Care should
# be taken to ensure that references are correct if multiple packages
# are folded and then one of them in unfolded
def _updateReference(self, fromnode, tonode, edge_data):
try:
ed = self.edgeData(fromnode, tonode)
except (KeyError, GraphError): # XXX: Why 'GraphError'
return self.createReference(fromnode, tonode, edge_data)
if not (isinstance(ed, DependencyInfo) and isinstance(edge_data, DependencyInfo)):
self.updateEdgeData(fromnode, tonode, edge_data)
else:
self.updateEdgeData(fromnode, tonode, ed._merged(edge_data))
def createReference(self, fromnode, tonode, edge_data='direct'):
"""
Create a reference from fromnode to tonode
"""
return super(ModuleGraph, self).createReference(fromnode, tonode, edge_data=edge_data)
def findNode(self, name, create_nspkg=True):
"""
Graph node uniquely identified by the passed fully-qualified module
name if this module has been added to the graph _or_ `None` otherwise.
If (in order):
. A namespace package with this identifier exists _and_ the passed
`create_nspkg` parameter is `True`, this package will be
instantiated and returned.
. A lazy node with this identifier and:
* No dependencies exists, this node will be instantiated and
returned.
* Dependencies exists, this node and all transitive dependencies of
this node be instantiated and this node returned.
. A non-lazy node with this identifier exists, this node will be
returned as is.
Parameters
----------
name : str
Fully-qualified name of the module whose graph node is to be found.
create_nspkg : bool
Whether or not to implicitly instantiate namespace packages. If
`True` _and_ this name is that of a previously registered namespace
package (i.e., in `self.nspackages`) not already added to the
graph, this package will be added to the graph. Defaults to `True`.
Returns
----------
Node
Graph node of this module if added to the graph _or_ `None`
otherwise.
"""
data = super(ModuleGraph, self).findNode(name)
if data is not None:
return data
if name in self.lazynodes:
deps = self.lazynodes.pop(name)
if deps is None:
# excluded module
m = self.createNode(ExcludedModule, name)
elif isinstance(deps, Alias):
other = self._safe_import_hook(deps, None, None).pop()
m = self.createNode(AliasNode, name, other)
self.implyNodeReference(m, other)
else:
m = self._safe_import_hook(name, None, None).pop()
for dep in deps:
self.implyNodeReference(m, dep)
return m
if name in self.nspackages and create_nspkg:
# name is a --single-version-externally-managed
# namespace package (setuptools/distribute)
pathnames = self.nspackages.pop(name)
m = self.createNode(NamespacePackage, name)
# FIXME: The filename must be set to a string to ensure that py2app
# works, it is not clear yet why that is. Setting to None would be
# cleaner.
m.filename = '-'
m.packagepath = _namespace_package_path(name, pathnames, self.path)
# As per comment at top of file, simulate runtime packagepath additions.
m.packagepath = m.packagepath + self._package_path_map.get(name, [])
return m
return None
def run_script(self, pathname, caller=None):
"""
Create a node by path (not module name). It is expected to be a Python
source file, and will be scanned for dependencies.
"""
self.msg(2, "run_script", pathname)
pathname = os.path.realpath(pathname)
m = self.findNode(pathname)
if m is not None:
return m
if sys.version_info[0] != 2:
with open(pathname, 'rb') as fp:
encoding = util.guess_encoding(fp)
with open(pathname, _READ_MODE, encoding=encoding) as fp:
contents = fp.read() + '\n'
if contents.startswith(BOM):
# Ignore BOM at start of input
contents = contents[1:]
else:
with open(pathname, _READ_MODE) as fp:
contents = fp.read() + '\n'
co_ast = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)
co = compile(co_ast, pathname, 'exec', 0, True)
m = self.createNode(Script, pathname)
self._updateReference(caller, m, None)
self._scan_code(m, co, co_ast)
m.code = co
if self.replace_paths:
m.code = self._replace_paths_in_code(m.code)
return m
#FIXME: For safety, the "source_module" parameter should default to the
#root node of the current graph if unpassed. This parameter currently
#defaults to None, thus disconnected modules imported in this manner (e.g.,
#hidden imports imported by depend.analysis.initialize_modgraph()).
def import_hook(
self,
target_module_partname,
source_module=None,
target_attr_names=None,
level=DEFAULT_IMPORT_LEVEL,
edge_attr=None,
):
"""
Import the module with the passed name, all parent packages of this
module, _and_ all submodules and attributes in this module with the
passed names from the previously imported caller module signified by
the passed graph node.
Unlike most import methods (e.g., `_safe_import_hook()`), this method
is designed to be publicly called by both external and internal
callers and hence is public.
Parameters
----------
target_module_partname : str
Partially-qualified name of the target module to be imported. See
`_safe_import_hook()` for further details.
source_module : Node
Graph node for the previously imported **source module** (i.e.,
module containing the `import` statement triggering the call to
this method) _or_ `None` if this module is to be imported in a
"disconnected" manner. **Passing `None` is _not_ recommended.**
Doing so produces a disconnected graph in which the graph node
created for the module to be imported will be disconnected and
hence unreachable from all other nodes -- which frequently causes
subtle issues in external callers (namely PyInstaller, which
silently ignores unreachable nodes).
target_attr_names : list
List of the unqualified names of all submodules and attributes to
be imported from the module to be imported if this is a "from"-
style import (e.g., `[encode_base64, encode_noop]` for the import
`from email.encoders import encode_base64, encode_noop`) _or_
`None` otherwise.
level : int
Whether to perform an absolute or relative import. See
`_safe_import_hook()` for further details.
Returns
----------
list
List of the graph nodes created for all modules explicitly imported
by this call, including the passed module and all submodules listed
in `target_attr_names` _but_ excluding all parent packages
implicitly imported by this call. If `target_attr_names` is `None`
or the empty list, this is guaranteed to be a list of one element:
the graph node created for the passed module.
Raises
----------
ImportError
If the target module to be imported is unimportable.
"""
self.msg(3, "_import_hook", target_module_partname, source_module, source_module, level)
source_package = self._determine_parent(source_module)
target_package, target_module_partname = self._find_head_package(
source_package, target_module_partname, level)
target_module = self._load_tail(target_package, target_module_partname)
target_modules = [target_module]
# If this is a "from"-style import *AND* this target module is
# actually a package, import all submodules of this package specified
# by the "import" half of this import (e.g., the submodules "bar" and
# "car" of the target package "foo" in "from foo import bar, car").
#
# If this target module is a non-package, it could still contain
# importable submodules (e.g., the non-package `os` module containing
# the `os.path` submodule). In this case, these submodules are already
# imported by this target module's pure-Python code. Since our import
# scanner already detects such imports, these submodules need *NOT* be
# reimported here.
if target_attr_names and isinstance(target_module, Package):
for target_submodule in self._import_importable_package_submodules(
target_module, target_attr_names):
if target_submodule not in target_modules:
target_modules.append(target_submodule)
# Add an edge from this source module to each target module.
for target_module in target_modules:
self._updateReference(
source_module, target_module, edge_data=edge_attr)
return target_modules
def _determine_parent(self, caller):
"""
Determine the package containing a node.
"""
self.msgin(4, "determine_parent", caller)
parent = None
if caller:
pname = caller.identifier
if isinstance(caller, Package):
parent = caller
elif '.' in pname:
pname = pname[:pname.rfind('.')]
parent = self.findNode(pname)
elif caller.packagepath:
# XXX: I have no idea why this line
# is necessary.
parent = self.findNode(pname)
self.msgout(4, "determine_parent ->", parent)
return parent
def _find_head_package(
self,
source_package,
target_module_partname,
level=DEFAULT_IMPORT_LEVEL):
"""
Import the target package providing the target module with the passed
name to be subsequently imported from the previously imported source
package corresponding to the passed graph node.
Parameters
----------
source_package : Package
Graph node for the previously imported **source package** (i.e.,
package containing the module containing the `import` statement
triggering the call to this method) _or_ `None` if this module is
to be imported in a "disconnected" manner. **Passing `None` is
_not_ recommended.** See the `_import_hook()` method for further
details.
target_module_partname : str
Partially-qualified name of the target module to be imported. See
`_safe_import_hook()` for further details.
level : int
Whether to perform absolute or relative imports. See the
`_safe_import_hook()` method for further details.
Returns
----------
(target_package, target_module_tailname)
2-tuple describing the imported target package, where:
* `target_package` is the graph node created for this package.
* `target_module_tailname` is the unqualified name of the target
module to be subsequently imported (e.g., `text` when passed a
`target_module_partname` of `email.mime.text`).
Raises
----------
ImportError
If the package to be imported is unimportable.
"""
self.msgin(4, "find_head_package", source_package, target_module_partname, level)
#FIXME: Rename all local variable names to something sensible. No,
#"p_fqdn" is not a sensible name.
# If this target module is a submodule...
if '.' in target_module_partname:
target_module_headname, target_module_tailname = (
target_module_partname.split('.', 1))
# Else, this target module is a top-level module.
else:
target_module_headname = target_module_partname
target_module_tailname = ''
# If attempting both absolute and relative imports...
if level == ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL:
if source_package:
target_package_name = source_package.identifier + '.' + target_module_headname
else:
target_package_name = target_module_headname
# Else if attempting only absolute imports...
elif level == ABSOLUTE_IMPORT_LEVEL:
target_package_name = target_module_headname
# Absolute import, ignore the parent
source_package = None
# Else if attempting only relative imports...
else:
if source_package is None:
self.msg(2, "Relative import outside of package")
raise InvalidRelativeImportError(
"Relative import outside of package (name=%r, parent=%r, level=%r)" % (
target_module_partname, source_package, level))
for i in range(level - 1):
if '.' not in source_package.identifier:
self.msg(2, "Relative import outside of package")
raise InvalidRelativeImportError(
"Relative import outside of package (name=%r, parent=%r, level=%r)" % (
target_module_partname, source_package, level))
p_fqdn = source_package.identifier.rsplit('.', 1)[0]
new_parent = self.findNode(p_fqdn)
if new_parent is None:
#FIXME: Repetition detected. Exterminate. Exterminate.
self.msg(2, "Relative import outside of package")
raise InvalidRelativeImportError(
"Relative import outside of package (name=%r, parent=%r, level=%r)" % (
target_module_partname, source_package, level))
assert new_parent is not source_package, (
new_parent, source_package)
source_package = new_parent
if target_module_headname:
target_package_name = (
source_package.identifier + '.' + target_module_headname)
else:
target_package_name = source_package.identifier
# Graph node of this target package.
target_package = self._safe_import_module(
target_module_headname, target_package_name, source_package)
#FIXME: Why exactly is this necessary again? This doesn't quite seem
#right but maybe it is. Shouldn't absolute imports only be performed if
#the passed "level" is either "ABSOLUTE_IMPORT_LEVEL" or
#"ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL" -- or, more succinctly:
#
# if level < 1:
# If this target package is *NOT* importable and a source package was
# passed, attempt to import this target package as an absolute import.
if target_package is None and source_package is not None:
target_package_name = target_module_headname
source_package = None
# Graph node for the target package, again.
target_package = self._safe_import_module(
target_module_headname, target_package_name, source_package)
# If this target package is importable, return this package.
if target_package is not None:
self.msgout(4, "find_head_package ->", (target_package, target_module_tailname))
return target_package, target_module_tailname
# Else, raise an exception.
self.msgout(4, "raise ImportError: No module named", target_package_name)
raise ImportError("No module named " + target_package_name)
def _load_tail(self, package, submodule_name):
"""
Import the submodule with the passed name and all parent packages of
this module from the previously imported parent package corresponding
to the passed graph node.
Parameters
----------
package : Package
Graph node of the previously imported package containing this
submodule.
submodule_name : str
Name of the submodule to be imported in either qualified (e.g.,
`email.mime.base`) or unqualified (e.g., `base`) form.
Returns
----------
Node
Graph node created for this submodule.
Raises
----------
ImportError
If this submodule is unimportable.
"""
self.msgin(4, "load_tail", package, submodule_name)
submodule = package
while submodule_name:
i = submodule_name.find('.')
if i < 0:
i = len(submodule_name)
head, submodule_name = submodule_name[:i], submodule_name[i+1:]
mname = "%s.%s" % (submodule.identifier, head)
submodule = self._safe_import_module(head, mname, submodule)
if submodule is None:
# FIXME: Why do we no longer return a MissingModule instance?
# result = self.createNode(MissingModule, mname)
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + repr(mname))
self.msgout(4, "load_tail ->", submodule)
return submodule
#FIXME: Refactor from a generator yielding graph nodes into a non-generator
#returning a list or tuple of all yielded graph nodes. This method is only
#called once above and the return value of that call is only iterated over
#as a list or tuple. There's no demonstrable reason for this to be a
#generator. Generators are great for their intended purposes (e.g., as
#continuations). This isn't one of those purposes.
def _import_importable_package_submodules(self, package, attr_names):
"""
Generator importing and yielding each importable submodule (of the
previously imported package corresponding to the passed graph node)
whose unqualified name is in the passed list.
Elements of this list that are _not_ importable submodules of this
package are either:
* Ignorable attributes (e.g., classes, globals) defined at the top
level of this package's `__init__` submodule, which will be ignored.
* Else, unignorable unimportable submodules, in which case an
exception is raised.
Parameters
----------
package : Package
Graph node of the previously imported package containing the
modules to be imported and yielded.
attr_names : list
List of the unqualified names of all attributes of this package to
attempt to import as submodules. This list will be internally
converted into a set, safely ignoring any duplicates in this list
(e.g., reducing the "from"-style import
`from foo import bar, car, far, bar, car, far` to merely
`from foo import bar, car, far`).
Yields
----------
Node
Graph node created for the currently imported submodule.
Raises
----------
ImportError
If any attribute whose name is in `attr_names` is neither:
* An importable submodule of this package.
* An ignorable global attribute (e.g., class, variable) defined at
the top level of this package's `__init__` submodule.
In this case, this attribute _must_ be an unimportable submodule of
this package.
"""
# Ignore duplicate submodule names in the passed list.
attr_names = set(attr_names)
self.msgin(4, "_import_importable_package_submodules", package, attr_names)
#FIXME: This test *SHOULD* be superfluous and hence safely removable.
#The higher-level _scan_bytecode() and _collect_import() methods
#already guarantee "*" characters to be removed from fromlists.
if '*' in attr_names:
attr_names.update(self._find_all_submodules(package))
attr_names.remove('*')
# self.msg(4, '_import_importable_package_submodules (global attrs)', package.identifier, package._global_attr_names)
# For the name of each attribute to be imported from this package...
for attr_name in attr_names:
# self.msg(4, '_import_importable_package_submodules (fromlist attr)', package.identifier, attr_name)
# Graph node of this attribute if this attribute is a previously
# imported module or None otherwise.
submodule = package.get_submodule_or_none(attr_name)
# If this attribute is *NOT* a previously imported module, attempt
# to import this attribute as a submodule of this package.
if submodule is None:
# Fully-qualified name of this submodule.
submodule_name = package.identifier + '.' + attr_name
# Graph node of this submodule if importable or None otherwise.
submodule = self._safe_import_module(
attr_name, submodule_name, package)
# If this submodule is unimportable...
if submodule is None:
# If this attribute is a global (e.g., class, variable)
# defined at the top level of this package's "__init__"
# submodule, this importation is safely ignorable. Do so
# and skip to the next attribute.
#
# This behaviour is non-conformant with Python behaviour,
# which is bad, but is required to sanely handle all
# possible edge cases, which is good. In Python, a global
# attribute defined at the top level of a package's
# "__init__" submodule shadows a submodule of the same name
# in that package. Attempting to import that submodule
# instead imports that attribute; thus, that submodule is
# effectively unimportable. In this method and elsewhere,
# that submodule is tested for first and hence shadows that
# attribute -- the opposite logic. Attempts to import that
# attribute are mistakenly seen as attempts to import that
# submodule! Why?
#
# Edge cases. PyInstaller (and by extension ModuleGraph)
# only cares about module imports. Global attribute imports
# are parsed only as the means to this ends and are
# otherwise ignorable. The cost of erroneously shadowing:
#
# * Submodules by attributes is significant. Doing so
# prevents such submodules from being frozen and hence
# imported at application runtime.
# * Attributes by submodules is insignificant. Doing so
# could erroneously freeze such submodules despite their
# never being imported at application runtime. However,
# ModuleGraph is incapable of determining with certainty
# that Python logic in another module other than the
# "__init__" submodule containing these attributes does
# *NOT* delete these attributes and hence unshadow these
# submodules, which would then become importable at
# runtime and require freezing. Hence, ModuleGraph *MUST*
# permissively assume submodules of the same name as
# attributes to be unshadowed elsewhere and require
# freezing -- even if they do not.
#
# It is practically difficult (albeit technically feasible)
# for ModuleGraph to determine whether or not the target
# attribute names of "from"-style import statements (e.g.,
# "bar" and "car" in "from foo import bar, car") refer to
# non-ignorable submodules or ignorable non-module globals
# during opcode scanning. Distinguishing these two cases
# during opcode scanning would require a costly call to the
# _find_module() method, which would subsequently be
# repeated during import-graph construction. This could be
# ameliorated with caching, which itself would require
# costly space consumption and developer time.
#
# Since opcode scanning fails to distinguish these two
# cases, this and other methods subsequently called at
# import-graph construction time (e.g.,
# _safe_import_hook()) must do so. Since submodules of the
# same name as attributes must assume to be unshadowed
# elsewhere and require freezing, the only solution is to
# attempt to import an attribute as a non-ignorable module
# *BEFORE* assuming an attribute to be an ignorable
# non-module. Which is what this and other methods do.
#
# See Package.is_global_attr() for similar discussion.
if package.is_global_attr(attr_name):
self.msg(4, '_import_importable_package_submodules: ignoring from-imported global', package.identifier, attr_name)
continue
# Else, this attribute is an unimportable submodule. Since
# this is *NOT* safely ignorable, raise an exception.
else:
raise ImportError("No module named " + submodule_name)
# Yield this submodule's graph node to the caller.
yield submodule
self.msgin(4, "_import_importable_package_submodules ->")
def _find_all_submodules(self, m):
if not m.packagepath:
return
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
for path in m.packagepath:
try:
names = zipio.listdir(path)
except (os.error, IOError):
self.msg(2, "can't list directory", path)
continue
for info in (moduleInfoForPath(p) for p in names):
if info is None:
continue
if info[0] != '__init__':
yield info[0]
def alias_module(self, src_module_name, trg_module_name):
"""
Alias the source module to the target module with the passed names.
This method ensures that the next call to findNode() given the target
module name will resolve this alias. This includes importing and adding
a graph node for the source module if needed as well as adding a
reference from the target to source module.
Parameters
----------
src_module_name : str
Fully-qualified name of the existing **source module** (i.e., the
module being aliased).
trg_module_name : str
Fully-qualified name of the non-existent **target module** (i.e.,
the alias to be created).
"""
self.msg(3, 'alias_module "%s" -> "%s"' % (src_module_name, trg_module_name))
# print('alias_module "%s" -> "%s"' % (src_module_name, trg_module_name))
assert isinstance(src_module_name, str), '"%s" not a module name.' % str(src_module_name)
assert isinstance(trg_module_name, str), '"%s" not a module name.' % str(trg_module_name)
# If the target module has already been added to the graph as either a
# non-alias or as a different alias, raise an exception.
trg_module = self.findNode(trg_module_name)
if trg_module is not None and not (
isinstance(trg_module, AliasNode) and
trg_module.identifier == src_module_name):
raise ValueError(
'Target module "%s" already imported as "%s".' % (
trg_module_name, trg_module))
# See findNode() for details.
self.lazynodes[trg_module_name] = Alias(src_module_name)
def add_module(self, module):
"""
Add the passed module node to the graph if not already added.
If that module has a parent module or package with a previously added
node, this method also adds a reference from this module node to its
parent node and adds this module node to its parent node's namespace.
This high-level method wraps the low-level `addNode()` method, but is
typically _only_ called by graph hooks adding runtime module nodes. For
all other node types, the `import_module()` method should be called.
Parameters
----------
module : BaseModule
Graph node of the module to be added.
"""
self.msg(3, 'add_module', module)
# If no node exists for this module, add such a node.
module_added = self.findNode(module.identifier)
if module_added is None:
self.addNode(module)
else:
assert module == module_added, 'New module %r != previous %r.' % (module, module_added)
# If this module has a previously added parent, reference this module to
# its parent and add this module to its parent's namespace.
parent_name, _, module_basename = module.identifier.rpartition('.')
if parent_name:
parent = self.findNode(parent_name)
if parent is None:
self.msg(4, 'add_module parent not found:', parent_name)
else:
self.createReference(module, parent)
parent.add_submodule(module_basename, module)
def append_package_path(self, package_name, directory):
"""
Modulegraph does a good job at simulating Python's, but it can not
handle packagepath '__path__' modifications packages make at runtime.
Therefore there is a mechanism whereby you can register extra paths
in this map for a package, and it will be honored.
NOTE: This method has to be called before a package is resolved by
modulegraph.
Parameters
----------
module : str
Fully-qualified module name.
directory : str
Absolute or relative path of the directory to append to the
'__path__' attribute.
"""
paths = self._package_path_map.setdefault(package_name, [])
paths.append(directory)
def _safe_import_module(
self, module_partname, module_name, parent_module):
"""
Create a new graph node for the module with the passed name under the
parent package signified by the passed graph node _without_ raising
`ImportError` exceptions.
If this module has already been imported, this module's existing graph
node will be returned; else if this module is importable, a new graph
node will be added for this module and returned; else this module is
unimportable, in which case `None` will be returned. Like the
`_safe_import_hook()` method, this method does _not_ raise
`ImportError` exceptions when this module is unimportable.
Parameters
----------
module_partname : str
Unqualified name of the module to be imported (e.g., `text`).
module_name : str
Fully-qualified name of this module (e.g., `email.mime.text`).
parent_module : Package
Graph node of the previously imported parent module containing this
submodule _or_ `None` if this is a top-level module (i.e.,
`module_name` contains no `.` delimiters). This parent module is
typically but _not_ always a package (e.g., the `os.path` submodule
contained by the `os` module).
Returns
----------
Node
Graph node created for this module _or_ `None` if this module is
unimportable.
"""
self.msgin(3, "safe_import_module", module_partname, module_name, parent_module)
# If this module has *NOT* already been imported, do so.
module = self.findNode(module_name)
if module is None:
# List of the absolute paths of all directories to be searched for
# this module. This effectively defaults to "sys.path".
search_dirs = None
# Open file handle providing the physical contents of this module.
file_handle = None
# If this module has a parent package...
if parent_module is not None:
# ...with a list of the absolute paths of all directories
# comprising this package, prefer that to "sys.path".
if parent_module.packagepath is not None:
search_dirs = parent_module.packagepath
# Else, something is horribly wrong. Return emptiness.
else:
self.msgout(3, "safe_import_module -> None (parent_parent.packagepath is None)")
return None
try:
try:
file_handle, pathname, metadata = self._find_module(
module_partname, search_dirs, parent_module)
except ImportError as exc:
self.msgout(3, "safe_import_module -> None (%r)" % exc)
return None
module = self._load_module(
module_name, file_handle, pathname, metadata)
finally:
if file_handle is not None:
file_handle.close()
# If this is a submodule rather than top-level module...
if parent_module is not None:
self.msg(4, "safe_import_module create reference", module, "->", parent_module)
# Add an edge from this submodule to its parent module.
self._updateReference(
module, parent_module, edge_data=DependencyInfo(
conditional=False,
fromlist=False,
function=False,
tryexcept=False,
))
# Add this submodule to its parent module.
parent_module.add_submodule(module_partname, module)
# Return this module.
self.msgout(3, "safe_import_module ->", module)
return module
#FIXME: For clarity, rename method parameters to:
# def _load_module(self, module_name, file_handle, pathname, imp_info):
def _load_module(self, fqname, fp, pathname, info):
suffix, mode, typ = info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if typ == imp.PKG_DIRECTORY:
if isinstance(mode, (list, tuple)):
packagepath = mode
else:
packagepath = []
m = self._load_package(fqname, pathname, packagepath)
self.msgout(2, "load_module ->", m)
return m
if typ == imp.PY_SOURCE:
contents = fp.read()
if isinstance(contents, bytes):
contents += b'\n'
else:
contents += '\n'
try:
co = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)
if sys.version_info[:2] == (3, 5):
# In Python 3.5 some syntax problems with async
# functions are only reported when compiling to bytecode
compile(co, '-', 'exec', 0, True)
except SyntaxError:
co = None
cls = InvalidSourceModule
self.msg(2, "load_module: InvalidSourceModule", pathname)
else:
cls = SourceModule
elif typ == imp.PY_COMPILED:
data = fp.read(4)
magic = imp.get_magic()
if data != magic:
self.msg(2, "load_module: InvalidCompiledModule, "
"bad magic number", pathname, data, magic)
co = None
cls = InvalidCompiledModule
else:
if sys.version_info >= (3, 7):
fp.read(12)
elif sys.version_info >= (3, 4):
fp.read(8)
else:
fp.read(4)
try:
co = marshal.loads(fp.read())
cls = CompiledModule
except Exception as exc:
self.msg(2, "load_module: InvalidCompiledModule, "
"Cannot load code", pathname, exc)
co = None
cls = InvalidCompiledModule
elif typ == imp.C_BUILTIN:
cls = BuiltinModule
co = None
else:
cls = Extension
co = None
m = self.createNode(cls, fqname)
m.filename = pathname
if co is not None:
try:
if isinstance(co, ast.AST):
co_ast = co
co = compile(co_ast, pathname, 'exec', 0, True)
else:
co_ast = None
self._scan_code(m, co, co_ast)
if self.replace_paths:
co = self._replace_paths_in_code(co)
m.code = co
except SyntaxError:
self.msg(1, "load_module: SyntaxError in ", pathname)
cls = InvalidSourceModule
m = self.createNode(cls, fqname)
self.msgout(2, "load_module ->", m)
return m
def _safe_import_hook(
self, target_module_partname, source_module, target_attr_names,
level=DEFAULT_IMPORT_LEVEL, edge_attr=None):
"""
Import the module with the passed name and all parent packages of this
module from the previously imported caller module signified by the
passed graph node _without_ raising `ImportError` exceptions.
This method wraps the lowel-level `_import_hook()` method. On catching
an `ImportError` exception raised by that method, this method creates
and adds a `MissingNode` instance describing the unimportable module to
the graph instead.
Parameters
----------
target_module_partname : str
Partially-qualified name of the module to be imported. If `level`
is:
* `ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL` (e.g., the Python 2 default)
or a positive integer (e.g., an explicit relative import), the
fully-qualified name of this module is the concatenation of the
fully-qualified name of the caller module's package and this
parameter.
* `ABSOLUTE_IMPORT_LEVEL` (e.g., the Python 3 default), this name
is already fully-qualified.
* A non-negative integer (e.g., `1`), this name is typically the
empty string. In this case, this is a "from"-style relative
import (e.g., "from . import bar") and the fully-qualified name
of this module is dynamically resolved by import machinery.
source_module : Node
Graph node for the previously imported **caller module** (i.e.,
module containing the `import` statement triggering the call to
this method) _or_ `None` if this module is to be imported in a
"disconnected" manner. **Passing `None` is _not_ recommended.**
Doing so produces a disconnected graph in which the graph node
created for the module to be imported will be disconnected and
hence unreachable from all other nodes -- which frequently causes
subtle issues in external callers (e.g., PyInstaller, which
silently ignores unreachable nodes).
target_attr_names : list
List of the unqualified names of all submodules and attributes to
be imported via a `from`-style import statement from this target
module if any (e.g., the list `[encode_base64, encode_noop]` for
the import `from email.encoders import encode_base64, encode_noop`)
_or_ `None` otherwise. Ignored unless `source_module` is the graph
node of a package (i.e., is an instance of the `Package` class).
Why? Because:
* Consistency. The `_import_importable_package_submodules()`
method accepts a similar list applicable only to packages.
* Efficiency. Unlike packages, modules cannot physically contain
submodules. Hence, any target module imported via a `from`-style
import statement as an attribute from another target parent
module must itself have been imported in that target parent
module. The import statement responsible for that import must
already have been previously parsed by `ModuleGraph`, in which
case that target module will already be frozen by PyInstaller.
These imports are safely ignorable here.
level : int
Whether to perform an absolute or relative import. This parameter
corresponds exactly to the parameter of the same name accepted by
the `__import__()` built-in: "The default is -1 which indicates
both absolute and relative imports will be attempted. 0 means only
perform absolute imports. Positive values for level indicate the
number of parent directories to search relative to the directory of
the module calling `__import__()`." Defaults to -1 under Python 2
and 0 under Python 3. Since this default depends on the major
version of the current Python interpreter, depending on this
default can result in unpredictable and non-portable behaviour.
Callers are strongly recommended to explicitly pass this parameter
rather than implicitly accept this default.
Returns
----------
list
List of the graph nodes created for all modules explicitly imported
by this call, including the passed module and all submodules listed
in `target_attr_names` _but_ excluding all parent packages
implicitly imported by this call. If `target_attr_names` is either
`None` or the empty list, this is guaranteed to be a list of one
element: the graph node created for the passed module. As above,
`MissingNode` instances are created for all unimportable modules.
"""
self.msg(3, "_safe_import_hook", target_module_partname, source_module, target_attr_names, level)
def is_swig_candidate():
return (source_module is not None and
target_attr_names is None and
level == ABSOLUTE_IMPORT_LEVEL and
type(source_module) is SourceModule and
target_module_partname ==
'_' + source_module.identifier.rpartition('.')[2] and
sys.version_info[0] == 3)
def is_swig_wrapper(source_module):
# TODO Define a new function util.open_text_file() performing
# this logic, which is repeated numerous times in this module.
# FIXME: Actually, can't we just use the new compat.open()
# function to reliably open text files in a portable manner?
with open(source_module.filename, 'rb') as source_module_file:
encoding = util.guess_encoding(source_module_file)
with open(source_module.filename, _READ_MODE, encoding=encoding) \
as source_module_file:
first_line = source_module_file.readline()
self.msg(5, 'SWIG wrapper candidate first line: %r' % (first_line))
return "automatically generated by SWIG" in first_line
# List of the graph nodes created for all target modules both
# imported by and returned from this call, whose:
#
# * First element is the graph node for the core target module
# specified by the "target_module_partname" parameter.
# * Remaining elements are the graph nodes for all target submodules
# specified by the "target_attr_names" parameter.
target_modules = None
# True if this is a Python 2-style implicit relative import of a
# SWIG-generated C extension. False if we checked and it is not SWIG.
# None if we haven't checked yet.
is_swig_import = None
# Attempt to import this target module in the customary way.
try:
target_modules = self.import_hook(
target_module_partname, source_module,
target_attr_names=None, level=level, edge_attr=edge_attr)
# Failing that, defer to custom module importers handling non-standard
# import schemes (namely, SWIG).
except InvalidRelativeImportError:
self.msgout(2, "Invalid relative import", level,
target_module_partname, target_attr_names)
result = []
for sub in target_attr_names or '*':
m = self.createNode(InvalidRelativeImport,
'.' * level + target_module_partname, sub)
self._updateReference(source_module, m, edge_data=edge_attr)
result.append(m)
return result
except ImportError as msg:
# If this is an absolute top-level import under Python 3 and if the
# name to be imported is the caller's name prefixed by "_", this
# could be a SWIG-generated Python 2-style implicit relative import.
# SWIG-generated files contain functions named swig_import_helper()
# importing dynamic libraries residing in the same directory. For
# example, a SWIG-generated caller module "csr.py" might resemble:
#
# # This file was automatically generated by SWIG (http://www.swig.org).
# ...
# def swig_import_helper():
# ...
# try:
# fp, pathname, description = imp.find_module('_csr',
# [dirname(__file__)])
# except ImportError:
# import _csr
# return _csr
#
# While there exists no reasonable means for modulegraph to parse
# the call to imp.find_module(), the subsequent implicit relative
# import is trivially parsable. This import is prohibited under
# Python 3, however, and thus parsed only if the caller's file is
# parsable plaintext (as indicated by a filetype of ".py") and the
# first line of this file is the above SWIG header comment.
#
# The constraint that this library's name be the caller's name
# prefixed by '_' is explicitly mandated by SWIG and thus a
# reliable indicator of "SWIG-ness". The SWIG documentation states:
# "When linking the module, the name of the output file has to match
# the name of the module prefixed by an underscore."
#
# Only source modules (e.g., ".py"-suffixed files) are SWIG import
# candidates. All other node types are safely ignorable.
if is_swig_candidate():
self.msg(
4,
'SWIG import candidate (name=%r, caller=%r, level=%r)' % (
target_module_partname, source_module, level))
is_swig_import = is_swig_wrapper(source_module)
if is_swig_import:
# Convert this Python 2-compliant implicit relative
# import prohibited by Python 3 into a Python
# 3-compliant explicit relative "from"-style import for
# the duration of this function call by overwriting the
# original parameters passed to this call.
target_attr_names = [target_module_partname]
target_module_partname = ''
level = 1
self.msg(2,
'SWIG import (caller=%r, fromlist=%r, level=%r)'
% (source_module, target_attr_names, level))
# Import this target SWIG C extension's package.
try:
target_modules = self.import_hook(
target_module_partname, source_module,
target_attr_names=None,
level=level,
edge_attr=edge_attr)
except ImportError as msg:
self.msg(2, "SWIG ImportError:", str(msg))
# If this module remains unimportable...
if target_modules is None:
self.msg(2, "ImportError:", str(msg))
# Add this module as a MissingModule node.
target_module = self.createNode(
MissingModule,
_path_from_importerror(msg, target_module_partname))
self._updateReference(
source_module, target_module, edge_data=edge_attr)
# Initialize this list to this node.
target_modules = [target_module]
# Ensure that the above logic imported exactly one target module.
assert len(target_modules) == 1, (
'Expected import_hook() to'
'return only one module but received: {}'.format(target_modules))
# Target module imported above.
target_module = target_modules[0]
if isinstance(target_module, MissingModule) \
and is_swig_import is None and is_swig_candidate() \
and is_swig_wrapper(source_module):
# if this possible swig C module was previously imported from
# a python module other than its corresponding swig python
# module, then it may have been considered a MissingModule.
# Try to reimport it now. For details see pull-request #2578
# and issue #1522.
#
# If this module was takes as a SWIG candidate above, but failed
# to import, this would be a MissingModule, too. Thus check if
# this was the case (is_swig_import would be not None) to avoid
# recursion error. If `is_swig_import` is None and we are still a
# swig candidate then that means we haven't properly imported this
# swig module yet so do that below.
#
# Remove the MissingModule node from the graph so that we can
# attempt a reimport and avoid collisions. This node should be
# fine to remove because the proper module will be imported and
# added to the graph in the next line (call to _safe_import_hook).
self.removeNode(target_module)
# Reimport the SWIG C module relative to the wrapper
target_modules = self._safe_import_hook(
target_module_partname, source_module,
target_attr_names=None, level=1, edge_attr=edge_attr)
# return the output regardless because it would just be
# duplicating the processing below
return target_modules
if isinstance(edge_attr, DependencyInfo):
edge_attr = edge_attr._replace(fromlist=True)
# If this is a "from"-style import *AND* this target module is a
# package, import all attributes listed by the "import" clause of this
# import that are submodules of this package. If this target module is
# *NOT* a package, these attributes are always ignorable globals (e.g.,
# classes, variables) defined at the top level of this module.
#
# If this target module is a non-package, it could still contain
# importable submodules (e.g., the non-package `os` module containing
# the `os.path` submodule). In this case, these submodules are already
# imported by this target module's pure-Python code. Since our import
# scanner already detects these imports, these submodules need *NOT* be
# reimported here. (Doing so would be harmless but inefficient.)
if target_attr_names and isinstance(target_module, Package):
# For the name of each attribute imported from this target package
# into this source module...
for target_submodule_partname in target_attr_names:
#FIXME: Is this optimization *REALLY* an optimization or at all
#necessary? The findNode() method called below should already
#be heavily optimized, in which case this optimization here is
#premature, senseless, and should be eliminated.
# If this attribute is a previously imported submodule of this
# target module, optimize this edge case.
if target_module.is_submodule(target_submodule_partname):
# Graph node for this submodule.
target_submodule = target_module.get_submodule(
target_submodule_partname)
#FIXME: What? Shouldn't "target_submodule" *ALWAYS* be
#non-None here? Assert this to be non-None instead.
if target_submodule is not None:
#FIXME: Why does duplication matter? List searches are
#mildly expensive.
# If this submodule has not already been added to the
# list of submodules to be returned, do so.
if target_submodule not in target_modules:
self._updateReference(
source_module,
target_submodule,
edge_data=edge_attr)
target_modules.append(target_submodule)
continue
# Fully-qualified name of this submodule.
target_submodule_name = (
target_module.identifier + '.' + target_submodule_partname)
# Graph node of this submodule if previously imported or None.
target_submodule = self.findNode(target_submodule_name)
# If this submodule has not been imported, do so as if this
# submodule were the only attribute listed by the "import"
# clause of this import (e.g., as "from foo import bar" rather
# than "from foo import car, far, bar").
if target_submodule is None:
# Attempt to import this submodule.
try:
# Ignore the list of graph nodes returned by this
# method. If both this submodule's package and this
# submodule are importable, this method returns a
# 2-element list whose second element is this
# submodule's graph node. However, if this submodule's
# package is importable but this submodule is not,
# this submodule is either:
#
# * An ignorable global attribute defined at the top
# level of this package's "__init__" submodule. In
# this case, this method returns a 1-element list
# without raising an exception.
# * A non-ignorable unimportable submodule. In this
# case, this method raises an "ImportError".
#
# While the first two cases are disambiguatable by the
# length of this list, doing so would render this code
# dependent on import_hook() details subject to change.
# Instead, call findNode() to decide the truthiness.
self.import_hook(
target_module_partname, source_module,
target_attr_names=[target_submodule_partname],
level=level,
edge_attr=edge_attr)
# Graph node of this submodule imported by the prior
# call if importable or None otherwise.
target_submodule = self.findNode(target_submodule_name)
# If this submodule does not exist, this *MUST* be an
# ignorable global attribute defined at the top level
# of this package's "__init__" submodule.
if target_submodule is None:
# Assert this to actually be the case.
assert target_module.is_global_attr(
target_submodule_partname), (
'No global named {} in {}.__init__'.format(
target_submodule_partname,
target_module.identifier))
# Skip this safely ignorable importation to the
# next attribute. See similar logic in the body of
# _import_importable_package_submodules().
self.msg(4, '_safe_import_hook', 'ignoring imported non-module global', target_module.identifier, target_submodule_partname)
continue
# If this is a SWIG C extension, instruct PyInstaller
# to freeze this extension under its unqualified rather
# than qualified name (e.g., as "_csr" rather than
# "scipy.sparse.sparsetools._csr"), permitting the
# implicit relative import in its parent SWIG module to
# successfully find this extension.
if is_swig_import:
# If a graph node with this name already exists,
# avoid collisions by emitting an error instead.
if self.findNode(target_submodule_partname):
self.msg(
2,
'SWIG import error: %r basename %r '
'already exists' % (
target_submodule_name,
target_submodule_partname))
else:
self.msg(
4,
'SWIG import renamed from %r to %r' % (
target_submodule_name,
target_submodule_partname))
target_submodule.identifier = (
target_submodule_partname)
# If this submodule is unimportable, add a MissingModule.
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
target_submodule = self.createNode(
MissingModule, target_submodule_name)
# Add this submodule to its package.
target_module.add_submodule(
target_submodule_partname, target_submodule)
if target_submodule is not None:
self._updateReference(
target_module, target_submodule, edge_data=edge_attr)
self._updateReference(
source_module, target_submodule, edge_data=edge_attr)
if target_submodule not in target_modules:
target_modules.append(target_submodule)
# Return the list of all target modules imported by this call.
return target_modules
def _scan_code(
self,
module,
module_code_object,
module_code_object_ast=None):
"""
Parse and add all import statements from the passed code object of the
passed source module to this graph, recursively.
**This method is at the root of all `ModuleGraph` recursion.**
Recursion begins here and ends when all import statements in all code
objects of all modules transitively imported by the source module
passed to the first call to this method have been added to the graph.
Specifically, this method:
1. If the passed `module_code_object_ast` parameter is non-`None`,
parses all import statements from this object.
2. Else, parses all import statements from the passed
`module_code_object` parameter.
1. For each such import statement:
1. Adds to this `ModuleGraph` instance:
1. Nodes for all target modules of these imports.
1. Directed edges from this source module to these target
modules.
2. Recursively calls this method with these target modules.
Parameters
----------
module : Node
Graph node of the module to be parsed.
module_code_object : PyCodeObject
Code object providing this module's disassembled Python bytecode.
Ignored unless `module_code_object_ast` is `None`.
module_code_object_ast : optional[ast.AST]
Optional abstract syntax tree (AST) of this module if any or `None`
otherwise. Defaults to `None`, in which case the passed
`module_code_object` is parsed instead.
"""
# For safety, guard against multiple scans of the same module by
# resetting this module's list of deferred target imports. While
# uncommon, this edge case can occur due to:
#
# * Dynamic package replacement via the replacePackage() function. For
# example, the real "_xmlplus" package dynamically replaces itself
# with the fake "xml" package into the "sys.modules" cache of all
# currently loaded modules at runtime.
module._deferred_imports = []
# Parse all imports from this module *BEFORE* adding these imports to
# the graph. If an AST is provided, parse that rather than this
# module's code object.
if module_code_object_ast is not None:
# Parse this module's AST for imports.
self._scan_ast(module, module_code_object_ast)
# Parse this module's code object for all relevant non-imports
# (e.g., global variable declarations and undeclarations).
self._scan_bytecode(
module, module_code_object, is_scanning_imports=False)
# Else, parse this module's code object for imports.
else:
self._scan_bytecode(
module, module_code_object, is_scanning_imports=True)
# Add all imports parsed above to this graph.
self._process_imports(module)
def _scan_ast(self, module, module_code_object_ast):
"""
Parse and add all import statements from the passed abstract syntax
tree (AST) of the passed source module to this graph, non-recursively.
Parameters
----------
module : Node
Graph node of the module to be parsed.
module_code_object_ast : ast.AST
Abstract syntax tree (AST) of this module to be parsed.
"""
visitor = _Visitor(self, module)
visitor.visit(module_code_object_ast)
#FIXME: Optimize. Global attributes added by this method are tested by
#other methods *ONLY* for packages, implying this method should scan and
#handle opcodes pertaining to global attributes (e.g.,
#"STORE_NAME", "DELETE_GLOBAL") only if the passed "module"
#object is an instance of the "Package" class. For all other module types,
#these opcodes should simply be ignored.
#
#After doing so, the "Node._global_attr_names" attribute and all methods
#using this attribute (e.g., Node.is_global()) should be moved from the
#"Node" superclass to the "Package" subclass.
def _scan_bytecode(
self, module, module_code_object, is_scanning_imports):
"""
Parse and add all import statements from the passed code object of the
passed source module to this graph, non-recursively.
This method parses all reasonably parsable operations (i.e., operations
that are both syntactically and semantically parsable _without_
requiring Turing-complete interpretation) directly or indirectly
involving module importation from this code object. This includes:
* `IMPORT_NAME`, denoting an import statement. Ignored unless
the passed `is_scanning_imports` parameter is `True`.
* `STORE_NAME` and `STORE_GLOBAL`, denoting the
declaration of a global attribute (e.g., class, variable) in this
module. This method stores each such declaration for subsequent
lookup. While global attributes are usually irrelevant to import
parsing, they remain the only means of distinguishing erroneous
non-ignorable attempts to import non-existent submodules of a package
from successful ignorable attempts to import existing global
attributes of a package's `__init__` submodule (e.g., the `bar` in
`from foo import bar`, which is either a non-ignorable submodule of
`foo` or an ignorable global attribute of `foo.__init__`).
* `DELETE_NAME` and `DELETE_GLOBAL`, denoting the
undeclaration of a previously declared global attribute in this
module.
Since `ModuleGraph` is _not_ intended to replicate the behaviour of a
full-featured Turing-complete Python interpreter, this method ignores
operations that are _not_ reasonably parsable from this code object --
even those directly or indirectly involving module importation. This
includes:
* `STORE_ATTR(namei)`, implementing `TOS.name = TOS1`. If `TOS` is the
name of a target module currently imported into the namespace of the
passed source module, this opcode would ideally be parsed to add that
global attribute to that target module. Since this addition only
conditionally occurs on the importation of this source module and
execution of the code branch in this module performing this addition,
however, that global _cannot_ be unconditionally added to that target
module. In short, only Turing-complete behaviour suffices.
* `DELETE_ATTR(namei)`, implementing `del TOS.name`. If `TOS` is the
name of a target module currently imported into the namespace of the
passed source module, this opcode would ideally be parsed to remove
that global attribute from that target module. Again, however, only
Turing-complete behaviour suffices.
Parameters
----------
module : Node
Graph node of the module to be parsed.
module_code_object : PyCodeObject
Code object of the module to be parsed.
is_scanning_imports : bool
`True` only if this method is parsing import statements from
`IMPORT_NAME` opcodes. If `False`, no import statements will be
parsed. This parameter is typically:
* `True` when parsing this module's code object for such imports.
* `False` when parsing this module's abstract syntax tree (AST)
(rather than code object) for such imports. In this case, that
parsing will have already parsed import statements, which this
parsing must avoid repeating.
"""
level = None
fromlist = None
# 'deque' is a list-like container with fast appends, pops on
# either end, and automatically discarding elements too much.
prev_insts = deque(maxlen=2)
for inst in util.iterate_instructions(module_code_object):
if not inst:
continue
# If this is an import statement originating from this module,
# parse this import.
#
# Note that the related "IMPORT_FROM" opcode need *NOT* be parsed.
# "IMPORT_NAME" suffices. For further details, see
# http://probablyprogramming.com/2008/04/14/python-import_name
if inst.opname == 'IMPORT_NAME':
# If this method is ignoring import statements, skip to the
# next opcode.
if not is_scanning_imports:
continue
assert prev_insts[-2].opname == 'LOAD_CONST'
assert prev_insts[-1].opname == 'LOAD_CONST'
# Python >=2.5: LOAD_CONST flags, LOAD_CONST names, IMPORT_NAME name
level = prev_insts[-2].argval
fromlist = prev_insts[-1].argval
assert fromlist is None or type(fromlist) is tuple
target_module_partname = inst.argval
#FIXME: The exact same logic appears in _collect_import(),
#which isn't particularly helpful. Instead, defer this logic
#until later by:
#
#* Refactor the "_deferred_imports" list to contain 2-tuples
# "(_safe_import_hook_args, _safe_import_hook_kwargs)" rather
# than 3-tuples "(have_star, _safe_import_hook_args,
# _safe_import_hook_kwargs)".
#* Stop prepending these tuples by a "have_star" boolean both
# here, in _collect_import(), and in _process_imports().
#* Shift the logic below to _process_imports().
#* Remove the same logic from _collect_import().
have_star = False
if fromlist is not None:
fromlist = uniq(fromlist)
if '*' in fromlist:
fromlist.remove('*')
have_star = True
# Record this import as originating from this module for
# subsequent handling by the _process_imports() method.
module._deferred_imports.append((
have_star,
(target_module_partname, module, fromlist, level),
{}
))
elif inst.opname in ('STORE_NAME', 'STORE_GLOBAL'):
# If this is the declaration of a global attribute (e.g.,
# class, variable) in this module, store this declaration for
# subsequent lookup. See method docstring for further details.
#
# Global attributes are usually irrelevant to import parsing, but
# remain the only means of distinguishing erroneous non-ignorable
# attempts to import non-existent submodules of a package from
# successful ignorable attempts to import existing global
# attributes of a package's "__init__" submodule (e.g., the "bar"
# in "from foo import bar", which is either a non-ignorable
# submodule of "foo" or an ignorable global attribute of
# "foo.__init__").
name = inst.argval
module.add_global_attr(name)
elif inst.opname in ('DELETE_NAME', 'DELETE_GLOBAL'):
# If this is the undeclaration of a previously declared global
# attribute (e.g., class, variable) in this module, remove that
# declaration to prevent subsequent lookup. See method docstring
# for further details.
name = inst.argval
module.remove_global_attr_if_found(name)
prev_insts.append(inst)
def _process_imports(self, source_module):
"""
Graph all target modules whose importations were previously parsed from
the passed source module by a prior call to the `_scan_code()` method
and methods call by that method (e.g., `_scan_ast()`,
`_scan_bytecode()`, `_scan_bytecode_stores()`).
Parameters
----------
source_module : Node
Graph node of the source module to graph target imports for.
"""
# If this source module imported no target modules, noop.
if not source_module._deferred_imports:
return
# For each target module imported by this source module...
for have_star, import_info, kwargs in source_module._deferred_imports:
# Graph node of the target module specified by the "from" portion
# of this "from"-style star import (e.g., an import resembling
# "from {target_module_name} import *") or ignored otherwise.
target_module = self._safe_import_hook(*import_info, **kwargs)[0]
# If this is a "from"-style star import, process this import.
if have_star:
#FIXME: Sadly, the current approach to importing attributes
#from "from"-style star imports is... simplistic. This should
#be revised as follows. If this target module is:
#
#* A package:
# * Whose "__init__" submodule defines the "__all__" global
# attribute, only attributes listed by this attribute should
# be imported.
# * Else, *NO* attributes should be imported.
#* A non-package:
# * Defining the "__all__" global attribute, only attributes
# listed by this attribute should be imported.
# * Else, only public attributes whose names are *NOT*
# prefixed by "_" should be imported.
source_module.add_global_attrs_from_module(target_module)
source_module._starimported_ignored_module_names.update(
target_module._starimported_ignored_module_names)
# If this target module has no code object and hence is
# unparsable, record its name for posterity.
if target_module.code is None:
target_module_name = import_info[0]
source_module._starimported_ignored_module_names.add(
target_module_name)
# For safety, prevent these imports from being reprocessed.
source_module._deferred_imports = None
def _load_package(self, fqname, pathname, pkgpath):
"""
Called only when an imp.PKG_DIRECTORY is found
"""
self.msgin(2, "load_package", fqname, pathname, pkgpath)
newname = _replacePackageMap.get(fqname)
if newname:
fqname = newname
ns_pkgpath = _namespace_package_path(fqname, pkgpath or [], self.path)
if ns_pkgpath or pkgpath:
# this is a namespace package
m = self.createNode(NamespacePackage, fqname)
m.filename = '-'
m.packagepath = ns_pkgpath
else:
m = self.createNode(Package, fqname)
m.filename = pathname
# PEP-302-compliant loaders return the pathname of the
# `__init__`-file, not the packge directory.
if os.path.basename(pathname).startswith('__init__.'):
pathname = os.path.dirname(pathname)
m.packagepath = [pathname] + ns_pkgpath
# As per comment at top of file, simulate runtime packagepath additions.
m.packagepath = m.packagepath + self._package_path_map.get(fqname, [])
try:
self.msg(2, "find __init__ for %s"%(m.packagepath,))
fp, buf, stuff = self._find_module("__init__", m.packagepath, parent=m)
except ImportError:
pass
else:
try:
self.msg(2, "load __init__ for %s"%(m.packagepath,))
self._load_module(fqname, fp, buf, stuff)
finally:
if fp is not None:
fp.close()
self.msgout(2, "load_package ->", m)
return m
def _find_module(self, name, path, parent=None):
"""
3-tuple describing the physical location of the module with the passed
name if this module is physically findable _or_ raise `ImportError`.
This high-level method wraps the low-level `modulegraph.find_module()`
function with additional support for graph-based module caching.
Parameters
----------
name : str
Fully-qualified name of the Python module to be found.
path : list
List of the absolute paths of all directories to search for this
module _or_ `None` if the default path list `self.path` is to be
searched.
parent : Node
Package containing this module if this module is a submodule of a
package _or_ `None` if this is a top-level module.
Returns
----------
(file_handle, filename, metadata)
See `modulegraph._find_module()` for details.
Raises
----------
ImportError
If this module is _not_ found.
"""
if parent is not None:
# assert path is not None
fullname = parent.identifier + '.' + name
else:
fullname = name
node = self.findNode(fullname)
if node is not None:
self.msg(3, "find_module: already included?", node)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return self._find_module_path(fullname, name, path)
def _find_module_path(self, fullname, module_name, search_dirs):
"""
3-tuple describing the physical location of the module with the passed
name if this module is physically findable _or_ raise `ImportError`.
This low-level function is a variant on the standard `imp.find_module()`
function with additional support for:
* Multiple search paths. The passed list of absolute paths will be
iteratively searched for the first directory containing a file
corresponding to this module.
* Compressed (e.g., zipped) packages.
For efficiency, the higher level `ModuleGraph._find_module()` method
wraps this function with support for module caching.
Parameters
----------
module_name : str
Fully-qualified name of the module to be found.
search_dirs : list
List of the absolute paths of all directories to search for this
module (in order). Searching will halt at the first directory
containing this module.
Returns
----------
(file_handle, filename, metadata)
3-tuple describing the physical location of this module, where:
* `file_handle` is an open read-only file handle from which the
on-disk contents of this module may be read if this is a
pure-Python module or `None` otherwise (e.g., if this is a
package or C extension).
* `filename` is the absolute path of this file.
* `metadata` is itself a 3-tuple `(filetype, open_mode, imp_type)`.
See the `_IMPORTABLE_FILETYPE_TO_METADATA` dictionary for
details.
Raises
----------
ImportError
If this module is _not_ found.
"""
self.msgin(4, "_find_module_path <-", fullname, search_dirs)
# TODO: Under:
#
# * Python 3.3, the following logic should be replaced by logic
# leveraging only the "importlib" module.
# * Python 3.4, the following logic should be replaced by a call to
# importlib.util.find_spec().
# Top-level 3-tuple to be returned.
path_data = None
# File handle to be returned.
file_handle = None
# List of the absolute paths of all directories comprising the
# namespace package to which this module belongs if any.
namespace_dirs = []
try:
for search_dir in search_dirs:
# PEP 302-compliant importer making loaders for this directory.
importer = pkgutil.get_importer(search_dir)
# If this directory is not importable, continue.
if importer is None:
# self.msg(4, "_find_module_path importer not found", search_dir)
continue
# Get the PEP 302-compliant loader object loading this module.
#
# If this importer defines the PEP 302-compliant find_loader()
# method, prefer that.
if hasattr(importer, 'find_loader'):
loader, loader_namespace_dirs = importer.find_loader(
module_name)
namespace_dirs.extend(loader_namespace_dirs)
# Else if this importer defines the Python 2-specific
# find_module() method, fall back to that. Despite the method
# name, this method returns a loader rather than a module.
elif hasattr(importer, 'find_module'):
loader = importer.find_module(module_name)
# Else, raise an exception.
else:
raise ImportError(
"Module %r importer %r loader unobtainable" % (module_name, importer))
# If this module is not loadable from this directory, continue.
if loader is None:
# self.msg(4, "_find_module_path loader not found", search_dir)
continue
# 3-tuple of metadata to be returned.
metadata = None
# Absolute path of this module. If this module resides in a
# compressed archive, this is the absolute path of this module
# after extracting this module from that archive and hence
# should not exist; else, this path should typically exist.
pathname = None
# If this loader defines the PEP 302-compliant get_filename()
# method, preferably call that method first. Most if not all
# loaders (including zipimporter objects) define this method.
if hasattr(loader, 'get_filename'):
pathname = loader.get_filename(module_name)
# Else if this loader provides a "path" attribute, defer to that.
elif hasattr(loader, 'path'):
pathname = loader.path
# Else, raise an exception.
else:
raise ImportError(
"Module %r loader %r path unobtainable" % (module_name, loader))
# If no path was found, this is probably a namespace package. In
# such case, continue collecting namespace directories.
if pathname is None:
self.msg(4, "_find_module_path path not found", pathname)
continue
# If this loader defines the PEP 302-compliant is_package()
# method returning True, this is a non-namespace package.
if hasattr(loader, 'is_package') and loader.is_package(module_name):
metadata = ('', '', imp.PKG_DIRECTORY)
# Else, this is either a module or C extension.
else:
# In either case, this path must have a filetype.
filetype = os.path.splitext(pathname)[1]
if not filetype:
raise ImportError(
'Non-package module %r path %r has no filetype' % (module_name, pathname))
# 3-tuple of metadata specific to this filetype.
metadata = _IMPORTABLE_FILETYPE_TO_METADATA.get(
filetype, None)
if metadata is None:
raise ImportError(
'Non-package module %r filetype %r unrecognized' % (pathname, filetype))
# See "_IMPORTABLE_FILETYPE_TO_METADATA" for details.
open_mode = metadata[1]
imp_type = metadata[2]
# If this is a C extension, leave this path unopened.
if imp_type == imp.C_EXTENSION:
pass
# Else, this is a module.
#
# If this loader defines the PEP 302-compliant get_source()
# method, open the returned string as a file-like buffer.
elif imp_type == imp.PY_SOURCE and hasattr(loader, 'get_source'):
file_handle = StringIO(loader.get_source(module_name))
# If this loader defines the PEP 302-compliant get_code()
# method, open the returned object as a file-like buffer.
elif imp_type == imp.PY_COMPILED and hasattr(loader, 'get_code'):
try:
code_object = loader.get_code(module_name)
if code_object is None:
file_handle = BytesIO(b'\0\0\0\0\0\0\0\0')
else:
file_handle = _code_to_file(code_object)
except ImportError:
# post-bone the ImportError until load_module
file_handle = BytesIO(b'\0\0\0\0\0\0\0\0')
# If this is an uncompiled file under Python 3, open this
# file for encoding-aware text reading.
elif imp_type == imp.PY_SOURCE and sys.version_info[0] == 3:
with open(pathname, 'rb') as file_handle:
encoding = util.guess_encoding(file_handle)
file_handle = open(
pathname, open_mode, encoding=encoding)
# Else, this is either a compiled file or an uncompiled
# file under Python 2. In either case, open this file.
else:
file_handle = open(pathname, open_mode)
# Return such metadata.
path_data = (file_handle, pathname, metadata)
break
# Else if this is a namespace package, return such metadata.
else:
if namespace_dirs:
path_data = (None, namespace_dirs[0], (
'', namespace_dirs, imp.PKG_DIRECTORY))
except UnicodeDecodeError as exc:
self.msgout(1, "_find_module_path -> unicode error", exc)
# Ensure that exceptions are logged, as this function is typically
# called by the import_module() method which squelches ImportErrors.
except Exception as exc:
self.msgout(4, "_find_module_path -> exception", exc)
raise
# If this module was not found, raise an exception.
self.msgout(4, "_find_module_path ->", path_data)
if path_data is None:
raise ImportError("No module named " + repr(module_name))
return path_data
def create_xref(self, out=None):
global header, footer, entry, contpl, contpl_linked, imports
if out is None:
out = sys.stdout
scripts = []
mods = []
for mod in self.flatten():
name = os.path.basename(mod.identifier)
if isinstance(mod, Script):
scripts.append((name, mod))
else:
mods.append((name, mod))
scripts.sort()
mods.sort()
scriptnames = [sn for sn, m in scripts]
scripts.extend(mods)
mods = scripts
title = "modulegraph cross reference for " + ', '.join(scriptnames)
print(header % {"TITLE": title}, file=out)
def sorted_namelist(mods):
lst = [os.path.basename(mod.identifier) for mod in mods if mod]
lst.sort()
return lst
for name, m in mods:
content = ""
if isinstance(m, BuiltinModule):
content = contpl % {"NAME": name,
"TYPE": "<i>(builtin module)</i>"}
elif isinstance(m, Extension):
content = contpl % {"NAME": name,
"TYPE": "<tt>%s</tt>" % m.filename}
else:
url = pathname2url(m.filename or "")
content = contpl_linked % {"NAME": name, "URL": url,
'TYPE': m.__class__.__name__}
oute, ince = map(sorted_namelist, self.get_edges(m))
if oute:
links = []
for n in oute:
links.append(""" <a href="#%s">%s</a>\n""" % (n, n))
# #8226 = bullet-point; can't use html-entities since the
# test-suite uses xml.etree.ElementTree.XMLParser, which
# does't supprot them.
links = " • ".join(links)
content += imports % {"HEAD": "imports", "LINKS": links}
if ince:
links = []
for n in ince:
links.append(""" <a href="#%s">%s</a>\n""" % (n, n))
# #8226 = bullet-point; can't use html-entities since the
# test-suite uses xml.etree.ElementTree.XMLParser, which
# does't supprot them.
links = " • ".join(links)
content += imports % {"HEAD": "imported by", "LINKS": links}
print(entry % {"NAME": name, "CONTENT": content}, file=out)
print(footer, file=out)
def itergraphreport(self, name='G', flatpackages=()):
# XXX: Can this be implemented using Dot()?
nodes = list(map(self.graph.describe_node, self.graph.iterdfs(self)))
describe_edge = self.graph.describe_edge
edges = deque()
packagenodes = set()
packageidents = {}
nodetoident = {}
inpackages = {}
mainedges = set()
# XXX - implement
flatpackages = dict(flatpackages)
def nodevisitor(node, data, outgoing, incoming):
if not isinstance(data, Node):
return {'label': str(node)}
#if isinstance(d, (ExcludedModule, MissingModule, BadModule)):
# return None
s = '<f0> ' + type(data).__name__
for i, v in enumerate(data.infoTuple()[:1], 1):
s += '| <f%d> %s' % (i, v)
return {'label': s, 'shape': 'record'}
def edgevisitor(edge, data, head, tail):
# XXX: This method nonsense, the edge
# data is never initialized.
if data == 'orphan':
return {'style': 'dashed'}
elif data == 'pkgref':
return {'style': 'dotted'}
return {}
yield 'digraph %s {\ncharset="UTF-8";\n' % (name,)
attr = dict(rankdir='LR', concentrate='true')
cpatt = '%s="%s"'
for item in attr.items():
yield '\t%s;\n' % (cpatt % item,)
# find all packages (subgraphs)
for (node, data, outgoing, incoming) in nodes:
nodetoident[node] = getattr(data, 'identifier', None)
if isinstance(data, Package):
packageidents[data.identifier] = node
inpackages[node] = set([node])
packagenodes.add(node)
# create sets for subgraph, write out descriptions
for (node, data, outgoing, incoming) in nodes:
# update edges
for edge in (describe_edge(e) for e in outgoing):
edges.append(edge)
# describe node
yield '\t"%s" [%s];\n' % (
node,
','.join([
(cpatt % item) for item in
nodevisitor(node, data, outgoing, incoming).items()
]),
)
inside = inpackages.get(node)
if inside is None:
inside = inpackages[node] = set()
ident = nodetoident[node]
if ident is None:
continue
pkgnode = packageidents.get(ident[:ident.rfind('.')])
if pkgnode is not None:
inside.add(pkgnode)
graph = []
subgraphs = {}
for key in packagenodes:
subgraphs[key] = []
while edges:
edge, data, head, tail = edges.popleft()
if ((head, tail)) in mainedges:
continue
mainedges.add((head, tail))
tailpkgs = inpackages[tail]
common = inpackages[head] & tailpkgs
if not common and tailpkgs:
usepkgs = sorted(tailpkgs)
if len(usepkgs) != 1 or usepkgs[0] != tail:
edges.append((edge, data, head, usepkgs[0]))
edges.append((edge, 'pkgref', usepkgs[-1], tail))
continue
if common:
common = common.pop()
if tail == common:
edges.append((edge, data, tail, head))
elif head == common:
subgraphs[common].append((edge, 'pkgref', head, tail))
else:
edges.append((edge, data, common, head))
edges.append((edge, data, common, tail))
else:
graph.append((edge, data, head, tail))
def do_graph(edges, tabs):
edgestr = tabs + '"%s" -> "%s" [%s];\n'
# describe edge
for (edge, data, head, tail) in edges:
attribs = edgevisitor(edge, data, head, tail)
yield edgestr % (
head,
tail,
','.join([(cpatt % item) for item in attribs.items()]),
)
for g, edges in subgraphs.items():
yield '\tsubgraph "cluster_%s" {\n' % (g,)
yield '\t\tlabel="%s";\n' % (nodetoident[g],)
for s in do_graph(edges, '\t\t'):
yield s
yield '\t}\n'
for s in do_graph(graph, '\t'):
yield s
yield '}\n'
def graphreport(self, fileobj=None, flatpackages=()):
if fileobj is None:
fileobj = sys.stdout
fileobj.writelines(self.itergraphreport(flatpackages=flatpackages))
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print("%-15s %-25s %s" % ("Class", "Name", "File"))
print("%-15s %-25s %s" % ("-----", "----", "----"))
for m in sorted(self.flatten(), key=lambda n: n.identifier):
print("%-15s %-25s %s" % (type(m).__name__, m.identifier, m.filename or ""))
def _replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
f = os.path.join(f, '')
r = os.path.join(r, '')
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
else:
return co
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self._replace_paths_in_code(consts[i])
code_func = type(co)
if hasattr(co, 'co_kwonlyargcount'):
return code_func(
co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, co.co_code,
tuple(consts), co.co_names, co.co_varnames,
new_filename, co.co_name, co.co_firstlineno,
co.co_lnotab, co.co_freevars, co.co_cellvars)
else:
return code_func(
co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
|
etherkit/OpenBeacon2
|
client/win/venv/Lib/site-packages/PyInstaller/lib/modulegraph/modulegraph.py
|
Python
|
gpl-3.0
| 143,680
|
[
"VisIt"
] |
5236b4c58f7342d264109a51fa3efd2b40ac5747969593af0e5fb4790ebdbed2
|
def gaussian_filter1d_ppxf(spec, sig):
"""
Convolve a spectrum by a Gaussian with different sigma for every pixel.
If all sigma are the same this routine produces the same output as
scipy.ndimage.gaussian_filter1d, except for the border treatment.
Here the first/last p pixels are filled with zeros.
When creating a template library for SDSS data, this implementation
is 60x faster than a naive for loop over pixels.
:param spec: vector with the spectrum to convolve
:param sig: vector of sigma values (in pixels) for every pixel
:return: spec convolved with a Gaussian with dispersion sig
"""
sig = sig.clip(0.01) # forces zero sigmas to have 0.01 pixels
p = int(np.ceil(np.max(3*sig)))
m = 2*p + 1 # kernel size
x2 = np.linspace(-p, p, m)**2
n = spec.size
a = np.zeros((m, n))
# fig, ax = plt.subplots(1, 1, figsize=(16, 10))
for j in range(m): # Loop over the small size of the kernel
#print j, n-m+j+1
indices = n-m+j+1
a[j,:] = spec
a[j, p:-p] = spec[j:n-m+j+1]
# ax.plot(waveData, a[j,:], label=j)
# ax.update({'xlabel': 'Wavelength (nm)', 'ylabel': 'Flux (normalised)'})
# ax.legend()
# plt.show()
gau = np.exp(-x2[:, None]/(2*sig**2))
gau /= np.sum(gau, 0)[None, :] # Normalize kernel
conv_spectrum = np.sum(a*gau, 0)
return conv_spectrum
def gaussian_filter1d_vital(sigma, wave, flux):
# Calculate stellar broadening kernel
r_sigma = sigma / (wave[1] - wave[0])
# Kernel matrix
box = 2 #np.int64(3 * r_sigma) if np.int64(3 * r_sigma) < 3 else 3
kernel_len = 2 * box + 1
kernel = np.zeros((1, kernel_len))
kernel_range = np.arange(0, 2 * box + 1)
# Generating gaussian kernel with sigma (the norm factor is the sum of the gaussian)
kernel[0, :] = np.exp(-0.5 * ((np.square(kernel_range - box) / r_sigma)))
norm = np.sum(kernel[0, :])
kernel = kernel / norm
#Perform convolution
flux_convolved = convolve2d(flux, kernel, mode='same', boundary='symm')
p = int(np.ceil(np.max(3*sigma)))
m = 2*p + 1 # kernel size
x2 = np.linspace(-p, p, m)**2
n = flux.size
a = np.zeros((m, n))
for j in range(m): # Loop over the small size of the kernel
a[j, p:-p] = flux[j:n-m+j+1]
gau = np.exp(-x2[:, None]/(2*sigma**2))
gau /= np.sum(gau, 0)[None, :] # Normalize kernel
myKernel = np.exp(-0.5 * ((np.square(kernel_range - box) / sigma**2)))
myKernelN = myKernel / np.sum(myKernel)
flux_convolved = convolve2d(np.array([flux]), myKernelN, mode='same', boundary='symm')
print
print 'box', box
print 'p', p
print
print 'kernel (initial)', np.zeros((1, kernel_len)).shape
print 'a (initial)', np.zeros((m, n)).shape
print
print 'kernel_len', kernel_len
print 'm', m
print
print 'kernel_range', kernel_range
print 'x2', x2
print 'np.square(kernel_range - box)', np.square(kernel_range - box)
print
print 'x2[:, None]',x2[:, None]
print 'np.square(kernel_range - box)', np.square(kernel_range - box)
print
print 'np.exp(-x2[:, None]/(2*sig**2))', np.exp(-x2[:, None]/(2*sigma**2))
print 'np.exp(-0.5 * ((np.square(kernel_range - box) / sigma)))', myKernel
print
print 'myKernelN', myKernelN
print 'gau_N', gau
print
print 'kernel', kernel
print 'gau', gau
print
return flux_convolved
# class Attributes_SpectrumFitter():
#
# def __init__(self):
#
# # Temperature and density grid declaration
# self.tem_grid_range = arange(temp_grid[0], temp_grid[1], temp_grid[2])
# self.den_grid_range = arange(den_grid[0], den_grid[1], den_grid[2])
# self.den_grid_range[0] = 1 #Change the minimum value for the grid to 1
#
# # Reddening parameters
# self.Rv_model = R_v
# self.reddedning_curve_model = reddenig_curve
#
# # Declare high ionization temperature ions
# self.high_temp_ions = high_temp_ions
#
# # Lower accepted limit for ssps
# self.lowlimit_sspContribution = lowlimit_sspContribution
#
# # Dictionary to store parameters
# self.conf_dic = {'Te_neb': 10000.0, 'z_neb': 0.0, 'cHbeta_neb': 0.1, 'He1_neb': 0.085, 'He2_neb': 0.0}
#
#
# # Dictionary to store the data (This is in gen_synth_obs)
# self.obj_data = {}
# self.obj_data['obj_properties_file'] = obj_properties_file
# self.obj_data['obj_lines_file'] = obj_lines_file
# self.obj_data['obj_ssp_coeffs_file'] = obj_ssp_coeffs_file
# self.obj_data['obs_mask_address'] = obj_mask_file
# self.obj_data['output_folder'] = output_folder
# self.obj_data['flux_hbeta'] = obj_prop_df.loc['flux_hbeta'][0]
#
# # Dictionary with synthetic abundances
# self.abund_dict = dict(zip(abund_keys, abund_values.T))
#
# # Import the physical data from the log
# for param in obj_prop_df.index:
# self.obj_data[param] = obj_prop_df.loc[param][0]
#
# # Reddening parameters
# self.obj_data['lineFlambda'] = self.gasExtincParams(self.obj_data['lineWaves'], self.Rv_model, self.reddedning_curve_model,Hbeta_wave = 4861.331)
#
#
# #Calculate T_high assuming we get T_low
# self.obj_data['T_high'] = TOIII_TSIII_relation(self.obj_data['T_low'])
#
# # Compute lines flux
# self.obj_data['lineFluxes'] = self.calcEmFluxes(self.obj_data['T_low'], self.obj_data['T_high'],
# obj_prop_df.loc['n_e'][0],
# obj_prop_df.loc['cHbeta'][0], obj_prop_df.loc['tau'][0],
# self.abund_dict,
# self.obj_data['lineLabes'],
# self.obj_data['lineIons'],
# self.obj_data['lineFlambda'])
#
#
# # Use general error if this is provided
# self.obj_data['lineErr'] = self.obj_data['lineFluxes'] * error_lines
#
# # Save input conditions:
# self.obj_data['wavelengh_limits'] = wavelengh_limits
# self.obj_data['resample_inc'] = resample_inc
# self.obj_data['norm_interval'] = norm_interval
# self.obj_data['z_obj'] = obj_prop_df.loc['z_obj'][0]
#
# # Rest and observed wavelength
# obj_wave_rest = np.arange(wavelengh_limits[0], wavelengh_limits[-1], resample_inc, dtype=float)
# obj_wave_obs = obj_wave_rest * (1.0 + self.obj_data['z_obj'])
# self.obj_data['obs_wave_rest'] = obj_wave_rest
# self.obj_data['obs_wave'] = obj_wave_obs
#
# # Get Halpha flux to calibrate
# idx_Halpha = (self.obj_data['lineLabels'] == 'H1r_6563A')
# self.obj_data['flux_halpha'] = self.obj_data['recomb_fluxes'][idx_Halpha] * obj_prop_df.loc['flux_hbeta'][0]
#
# # Reddening parameters for the nebular continuum
# self.obj_data['nebFlambda'] = self.gasExtincParams(obj_wave_rest, self.Rv_model, self.reddedning_curve_model)
#
# # Calculate the nebular continua
# self.obj_data['obs_flux'] = self.nebFluxCont(obj_wave_rest,
# obj_prop_df.loc['cHbeta'][0], self.obj_data['nebFlambda'],
# obj_prop_df.loc['T_low'][0],
# obj_prop_df.loc['He1_abund'][0], obj_prop_df.loc['He2_abund'][0],
# self.obj_data['flux_halpha'])
#
#
# # Save input conditions:
# self.obj_data['wavelengh_limits'] = wavelengh_limits
# self.obj_data['resample_inc'] = resample_inc
# self.obj_data['norm_interval'] = norm_interval
# self.obj_data['z_star'] = obj_prop_df.loc['z_star'][0]
# self.obj_data['Av_star'] = obj_prop_df.loc['Av_star'][0]
# self.obj_data['sigma_star'] = obj_prop_df.loc['sigma_star'][0]
# self.obj_data['flux_hbeta'] = obj_prop_df.loc['flux_hbeta'][0]
# self.obj_data['eqw_hbeta'] = obj_prop_df.loc['eqw_hbeta'][0]
#
#
# self.sspPrefit_Idcs = np.where(idx_populations)[0]
# self.sspPrefit_Coeffs = bases_coeff[idx_populations]
# self.sspPrefit_Limits = np.vstack((self.sspPrefit_Coeffs * 0.8, self.sspPrefit_Coeffs * 1.2)).T
#
# #Object data
# self.input_continuum = object_continuum
# self.input_continuum_er = obj_flux_err
# self.input_wave = obj_wave
# self.int_mask = obj_mask
#
# #Bases parameters
# neglected_populations = np.where(~idx_populations)
# self.onBasesWave = self.ssp_lib['wave_resam']
# self.onBasesFlux = np.delete(self.ssp_lib['flux_resam'], neglected_populations, axis=0)
# self.onBasesFluxNorm = np.delete(self.ssp_lib['flux_norm'], neglected_populations, axis=0)
# self.onBasesFluxNormCoeffs = np.delete(self.ssp_lib['normFlux_coeff'], neglected_populations, axis=0)
# self.range_bases = np.arange(self.onBasesFlux.shape[0])
#
# # Limit for bases
# z_max_ssp = (self.ssp_lib['wave_resam'][0] / obj_wave[0]) - 1.0
# z_min_ssp = (self.ssp_lib['wave_resam'][-1] / obj_wave[-1]) - 1.0
# self.zMin_SspLimit = z_min_ssp
# self.zMax_SspLimit = round(z_max_ssp - 0.001, 3)
# self.z_object = self.obj_data['z_star']
#
# # Store the data vector
# self.obj_data['obs_wave_rest'] = obj_wave_rest
# self.obj_data['obs_wave'] = obj_wave_obs
# self.obj_data['stellar_flux'] = obj_flux
# self.obj_data['stellar_flux_err'] = obj_flux + stellar_err
# self.obj_data['sigma_continuum'] = 0.02
#
#
#
# class Attributes_ImportModelData():
#
# def __init__(self):
#
# self.paths_dict = {}
#
# self.lines_df = read_excel(self.paths_dict['lines_data_file'], sheetname=0, header=0, index_col=0)
#
# # Save the data in the dictionary (Excluding Hbeta)
# idx_lines = (obj_lines_df.index.isin(input_lines)) & (obj_lines_df.index != 'H1_4861A')
# self.obj_data['lineIons'] = obj_lines_df.loc[idx_lines].ion.values
# self.obj_data['lineLabes'] = obj_lines_df.loc[idx_lines].index.values
# self.obj_data['lineWaves'] = obj_lines_df.loc[idx_lines].obs_wavelength.values
# self.obj_data['linePynebCode'] = obj_lines_df.loc[idx_lines].pynebCode.values
# self.obj_data['lineFluxes'] = obj_lines_df.loc[idx_lines].obs_flux.values
# self.obj_data['lineErr'] = obj_lines_df.loc[idx_lines].obs_fluxErr.values
#
# # Generate the dictionary with pyneb ions
# print('-- Loading atoms data with PyNeb')
# self.ionDict = pn.getAtomDict(atom_list=obj_lines_df.ion.values)
#
# print('-- Atomic sources Loaded ')
# # for atom in self.ionDict.keys():
# # textPrint = '--- {}: {}'.format(atom, self.ionDict[atom].printSources())
# # print(textPrint)
#
# # Establish index of lines which below to high and low ionization zones
# self.idx_highU = np.in1d(self.obj_data['lineIons'], self.high_temp_ions)
# self.idx_lowU = ~self.idx_highU
#
# # Attributes to increase calculation speed
# self.n_recombLines, self.n_colExcLines = np.sum(self.obj_data['lineType'] == 'rec'), np.sum(self.obj_data['lineType'] == 'col')
# self.range_recombLines, self.range_colExcLines = np.arange(self.n_recombLines), np.arange(self.n_colExcLines)
#
# # Empty Ionic dictionary for the MCMC
# self.abund_dict = {ion: 0 for ion in self.ionDict.keys()}
#
# # Save input conditions:
# self.obj_data['wavelengh_limits'] = wavelengh_limits
# self.obj_data['resample_inc'] = resample_inc
# self.obj_data['norm_interval'] = norm_interval
# self.obj_data['z_star'] = obj_prop_df.loc['z_star'][0]
#
#
# class Attributes_EmissionComponents():
#
# def __init__(self):
#
# #Hbeta configuration
# self.Hbeta_label = 'H1_4861A'
# self.Hbeta_wave = 4862.683
# self.Hbeta_pynebCode = '4_2'
#
# #Import Optical depth function
# posHelium_Lines = ['He1_3889A','He1_4026A','He1_4387A', 'He1_4471A', 'He1_4686A','He1_4714A','He1_4922A','He1_5876A','He1_6678A','He1_7065A','He1_7281A','He1_10830A']
# self.Coef_ftau_dict = self.import_optical_depth_coeff_table(posHelium_Lines)
#
# self.Hbeta_xX = rc_Gas.X(self.Hbeta_wave)
#
# self.obj_data['lineXx'] = rc_Gas.X(self.obj_data['lineWaves'])
# self.obj_data['lineFlambda'] = self.obj_data['lineXx'] / self.Hbeta_xX - 1.0
#
# # Emissivity grid
# self.emis_grid = self.manage_emissivity_grids(self.obj_data['linePynebCode'], self.obj_data['lineIons'], forceGridsReset)
#
# # Emissivity grid for collisional excited lines
# self.Hbeta_emis_grid = self.manage_emissivity_grids(np.array([4861]), np.array(['H1r']), forceGridsReset)
#
# # Normalizing emis_grid grids by Hbeta
# self.emis_grid = self.recomb_emis_grid / self.Hbeta_emis_grid
#
# # logarithmic scale log scales
# self.log_emis_grid = np.log10(self.emis_grid)
#
# # Dictionary to store the emissivity surface coeffients
# self.emisCoeffs = {}
#
# class EmissionEquations():
#
# def __init__(self):
|
Delosari/dazer
|
bin/lib/Astro_Libraries/old_versions/attributes_declaration.py
|
Python
|
mit
| 13,802
|
[
"Gaussian"
] |
e51e9486618f96742dc03fc2c15ec14e0bf2f65e581b582d9e6fa17aa659311b
|
'''
libChEBIpy (c) University of Manchester 2015
libChEBIpy is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from ._base_object import BaseObject
class Comment(BaseObject):
'''Class representing a ChEBI comment.'''
def __init__(self, datatype_id, datatype, text, created_on):
self.__datatype_id = datatype_id
self.__datatype = datatype
self.__text = text
self.__created_on = created_on
BaseObject.__init__(self)
def get_datatype(self):
'''Returns datatype'''
return self.__datatype
def get_text(self):
'''Returns text'''
return self.__text
def get_created_on(self):
'''Returns created_on'''
return self.__created_on
def __get_datatype_id(self):
'''Returns datatype_id'''
return self.__datatype_id
|
libChEBI/libChEBIpy
|
libchebipy/_comment.py
|
Python
|
mit
| 926
|
[
"VisIt"
] |
3c3bcc753bf017c801f928f394d3df69013160e229707a89f7fb1a492df69b84
|
from i3pystatus.core.util import internet, require
from i3pystatus.scores import ScoresBackend
import copy
import pytz
import time
from collections import namedtuple
from datetime import datetime
LIVE_URL = 'http://live.premierleague.com/#/gameweek/%s/matchday/%s/match/%s'
CONTEXT_URL = 'http://live.premierleague.com/syndicationdata/context.json'
SCOREBOARD_URL = 'http://live.premierleague.com/'
API_URL = 'http://live.premierleague.com/syndicationdata/competitionId=%s/seasonId=%s/gameWeekId=%s/scores.json'
STATS_URL = 'http://live.premierleague.com/syndicationdata/competitionId=%s/seasonId=%s/matchDayId=%s/league-table.json'
MATCH_DETAILS_URL = 'http://live.premierleague.com/syndicationdata/competitionId=%s/seasonId=%s/matchDayId=%s/matchId=%s/match-details.json'
MATCH_STATUS_PREGAME = 1
MATCH_STATUS_IN_PROGRESS = 2
MATCH_STATUS_FINAL = 3
MATCH_STATUS_HALFTIME = 4
class EPL(ScoresBackend):
'''
Backend to retrieve scores from the English Premier League. For usage
examples, see :py:mod:`here <.scores>`.
.. rubric:: Promotion / Relegation
Due to promotion/relegation, the **team_colors** configuration will
eventuall become out of date. When this happens, it will be necessary to
manually set the colors for the newly-promoted teams until the source for
this module is updated. An example of setting colors for newly promoted
teams can be seen below:
.. code-block:: python
from i3pystatus import Status
from i3pystatus.scores import epl
status = Status()
status.register(
'scores',
hints={'markup': 'pango'},
colorize_teams=True,
backends=[
epl.EPL(
teams=['LIV'],
team_colors={
'ABC': '#1D78CA',
'DEF': '#8AFEC3',
'GHI': '#33FA6D',
},
),
],
)
status.run()
.. rubric:: Available formatters
* `{home_name}` — Name of home team (e.g. **Tottenham Hotspur**)
* `{home_name_short}` — Shortened team name (e.g. **Spurs**)
* `{home_abbrev}` — 2 or 3-letter abbreviation for home team's city (e.g.
**TOT**)
* `{home_score}` — Home team's current score
* `{home_wins}` — Home team's number of wins
* `{home_losses}` — Home team's number of losses
* `{home_draws}` — Home team's number of draws
* `{home_points}` — Home team's number of standings points
* `{home_favorite}` — Displays the value for the :py:mod:`.scores` module's
``favorite`` attribute, if the home team is one of the teams being
followed. Otherwise, this formatter will be blank.
* `{away_name}` — Name of away team (e.g. **Manchester United**)
* `{away_name_short}` — Name of away team's city (e.g. **Man Utd**)
* `{away_abbrev}` — 2 or 3-letter abbreviation for away team's name (e.g.
**MUN**)
* `{away_score}` — Away team's current score
* `{away_wins}` — Away team's number of wins
* `{away_losses}` — Away team's number of losses
* `{away_draws}` — Away team's number of draws
* `{away_points}` — Away team's number of standings points
* `{away_favorite}` — Displays the value for the :py:mod:`.scores` module's
``favorite`` attribute, if the away team is one of the teams being
followed. Otherwise, this formatter will be blank.
* `{minute}` — Current minute of game when in progress
* `{start_time}` — Start time of game in system's localtime (supports
strftime formatting, e.g. `{start_time:%I:%M %p}`)
.. rubric:: Team abbreviations
* **ARS** — Arsenal
* **AVL** — Aston Villa
* **BOU** — Bournemouth
* **CHE** — Chelsea
* **CRY** — Crystal Palace
* **EVE** — Everton
* **LEI** — Leicester City
* **LIV** — Liverpool
* **MCI** — Manchester City
* **MUN** — Manchester United
* **NEW** — Newcastle United
* **NOR** — Norwich City
* **SOU** — Southampton
* **STK** — Stoke City
* **SUN** — Sunderland Association
* **SWA** — Swansea City
* **TOT** — Tottenham Hotspur
* **WAT** — Watford
* **WBA** — West Bromwich Albion
* **WHU** — West Ham United
'''
interval = 300
settings = (
('favorite_teams', 'List of abbreviations of favorite teams. Games '
'for these teams will appear first in the scroll '
'list. A detailed description of how games are '
'ordered can be found '
':ref:`here <scores-game-order>`.'),
('all_games', 'If set to ``True``, all games will be present in '
'the scroll list. If set to ``False``, then only '
'games from **favorite_teams** will be present in '
'the scroll list.'),
('display_order', 'When **all_games** is set to ``True``, this '
'option will dictate the order in which games from '
'teams not in **favorite_teams** are displayed'),
('format_no_games', 'Format used when no tracked games are scheduled '
'for the current day (does not support formatter '
'placeholders)'),
('format_pregame', 'Format used when the game has not yet started'),
('format_in_progress', 'Format used when the game is in progress'),
('format_final', 'Format used when the game is complete'),
('team_colors', 'Dictionary mapping team abbreviations to hex color '
'codes. If overridden, the passed values will be '
'merged with the defaults, so it is not necessary to '
'define all teams if specifying this value.'),
('date', 'Date for which to display game scores, in **YYYY-MM-DD** '
'format. If unspecified, the date will be determined by '
'the return value of an API call to the **context_url**. '
'Due to API limitations, the date can presently only be '
'overridden to another date in the current week. This '
'option exists primarily for troubleshooting purposes.'),
('live_url', 'URL string to launch EPL Live Match Centre. This value '
'should not need to be changed.'),
('scoreboard_url', 'Link to the EPL scoreboard page. Like '
'**live_url**, this value should not need to be '
'changed.'),
('api_url', 'Alternate URL string from which to retrieve score data. '
'Like **live_url**, this value should not need to be '
'changed.'),
('stats_url', 'Alternate URL string from which to retrieve team '
'statistics. Like **live_url**, this value should not '
'need to be changed.'),
('match_details_url', 'Alternate URL string from which to retrieve '
'match details. Like **live_url**, this value '
'should not need to be changed.'),
)
required = ()
_default_colors = {
'ARS': '#ED1B22',
'AVL': '#94BEE5',
'BOU': '#CB0B0F',
'CHE': '#195FAF',
'CRY': '#195FAF',
'EVE': '#004F9E',
'LEI': '#304FB6',
'LIV': '#D72129',
'MCI': '#74B2E0',
'MUN': '#DD1921',
'NEW': '#06B3EB',
'NOR': '#00A651',
'SOU': '#DB1C26',
'STK': '#D81732',
'SUN': '#BC0007',
'SWA': '#B28250',
'TOT': '#DADADA',
'WAT': '#E4D500',
'WBA': '#B43C51',
'WHU': '#9DE4FA',
}
_valid_display_order = ['in_progress', 'final', 'pregame']
display_order = _valid_display_order
format_no_games = 'EPL: No games'
format_pregame = '[{scroll} ]EPL: [{away_favorite} ]{away_abbrev} ({away_points}, {away_wins}-{away_losses}-{away_draws}) at [{home_favorite} ]{home_abbrev} ({home_points}, {home_wins}-{home_losses}-{home_draws}) {start_time:%H:%M %Z}'
format_in_progress = '[{scroll} ]EPL: [{away_favorite} ]{away_abbrev} {away_score}[ ({away_power_play})], [{home_favorite} ]{home_abbrev} {home_score}[ ({home_power_play})] ({minute})'
format_final = '[{scroll} ]EPL: [{away_favorite} ]{away_abbrev} {away_score} ({away_points}, {away_wins}-{away_losses}-{away_draws}) at [{home_favorite} ]{home_abbrev} {home_score} ({home_points}, {home_wins}-{home_losses}-{home_draws}) (Final)'
team_colors = _default_colors
context_url = CONTEXT_URL
live_url = LIVE_URL
scoreboard_url = SCOREBOARD_URL
api_url = API_URL
stats_url = STATS_URL
match_details_url = MATCH_DETAILS_URL
def get_api_date(self):
# NOTE: We're not really using this date for EPL API calls, but we do
# need it to allow for a 'date' param to override which date we use for
# scores.
if self.date is not None and not isinstance(self.date, datetime):
try:
self.date = datetime.strptime(self.date, '%Y-%m-%d')
except (TypeError, ValueError):
self.logger.warning('Invalid date \'%s\'', self.date)
if self.date is None:
self.date = datetime.strptime(self.context.date, '%Y%m%d')
def get_context(self):
response = self.api_request(self.context_url)
if not response:
# There is no context data, but we still need a date to use in
# __init__.py to log that there are no games for the given date.
# Fall back to the parent class' function to set a date.
super(EPL, self).get_api_date()
return False
context_tuple = namedtuple(
'Context',
('competition', 'date', 'game_week', 'match_day', 'season')
)
self.context = context_tuple(
*[
response.get(x, '')
for x in ('competitionId', 'currentDay', 'gameWeekId',
'matchDayId', 'seasonId')
]
)
return True
def get_team_stats(self):
ret = {}
url = self.stats_url % (self.context.competition,
self.context.season,
self.context.match_day)
for item in self.api_request(url).get('Data', []):
try:
key = item.pop('TeamCode')
except KeyError:
self.logger.debug('Error occurred obtaining %s team stats',
self.__class__.__name__,
exc_info=True)
continue
ret[key] = item
return ret
def get_minute(self, data, id_):
match_status = data[id_].get('StatusId', MATCH_STATUS_PREGAME)
if match_status == MATCH_STATUS_HALFTIME:
return 'Halftime'
if match_status == MATCH_STATUS_IN_PROGRESS:
url = self.match_details_url % (self.context.competition,
self.context.season,
data[id_].get('MatchDayId', ''),
id_)
try:
response = self.api_request(url)
return '%s\'' % response['Data']['Minute']
except (KeyError, TypeError):
return '?\''
else:
return '?\''
def check_scores(self):
if not self.get_context():
data = team_game_map = {}
else:
self.get_api_date()
url = self.api_url % (self.context.competition,
self.context.season,
self.context.game_week)
for item in self.api_request(url).get('Data', []):
if item.get('Key', '') == self.date.strftime('%Y%m%d'):
game_list = item.get('Scores', [])
break
else:
game_list = []
self.logger.debug('game_list = %s', game_list)
team_stats = self.get_team_stats()
# Convert list of games to dictionary for easy reference later on
data = {}
team_game_map = {}
for game in game_list:
try:
id_ = game['Id']
except KeyError:
continue
try:
for key in ('HomeTeam', 'AwayTeam'):
team = game[key]['Code'].upper()
if team in self.favorite_teams:
team_game_map.setdefault(team, []).append(id_)
except KeyError:
continue
data[id_] = game
# Merge in the team stats, because they are not returned in the
# initial API request.
for key in ('HomeTeam', 'AwayTeam'):
team = game[key]['Code'].upper()
data[id_][key]['Stats'] = team_stats.get(team, {})
# Add the minute, if applicable
data[id_]['Minute'] = self.get_minute(data, id_)
self.interpret_api_return(data, team_game_map)
def process_game(self, game):
ret = {}
def _update(ret_key, game_key=None, callback=None, default='?'):
ret[ret_key] = self.get_nested(game,
game_key or ret_key,
callback=callback,
default=default)
self.logger.debug('Processing %s game data: %s',
self.__class__.__name__, game)
_update('id', 'Id')
_update('minute', 'Minute')
ret['live_url'] = self.live_url % (self.context.game_week,
self.context.match_day,
ret['id'])
status_map = {
MATCH_STATUS_PREGAME: 'pregame',
MATCH_STATUS_IN_PROGRESS: 'in_progress',
MATCH_STATUS_FINAL: 'final',
MATCH_STATUS_HALFTIME: 'in_progress',
}
status_code = game.get('StatusId')
if status_code is None:
self.logger.debug('%s game %s is missing StatusId',
self.__class__.__name__, ret['id'])
status_code = 1
ret['status'] = status_map[status_code]
for ret_key, game_key in (('home', 'HomeTeam'), ('away', 'AwayTeam')):
_update('%s_score' % ret_key, '%s:Score' % game_key, default=0)
_update('%s_name' % ret_key, '%s:Name' % game_key)
_update('%s_name_short' % ret_key, '%s:ShortName' % game_key)
_update('%s_abbrev' % ret_key, '%s:Code' % game_key)
_update('%s_wins' % ret_key, '%s:Stats:Won' % game_key, default=0)
_update('%s_losses' % ret_key, '%s:Stats:Lost' % game_key)
_update('%s_draws' % ret_key, '%s:Stats:Drawn' % game_key)
_update('%s_points' % ret_key, '%s:Stats:Points' % game_key)
try:
game_time = datetime.strptime(
game.get('DateTime', ''),
'%Y-%m-%dT%H:%M:%S'
)
except ValueError as exc:
# Log when the date retrieved from the API return doesn't match the
# expected format (to help troubleshoot API changes), and set an
# actual datetime so format strings work as expected. The times
# will all be wrong, but the logging here will help us make the
# necessary changes to adapt to any API changes.
self.logger.error(
'Error encountered determining game time for %s game %s:',
self.__class__.__name__,
ret['id'],
exc_info=True
)
game_time = datetime.datetime(1970, 1, 1)
london = pytz.timezone('Europe/London')
ret['start_time'] = london.localize(game_time).astimezone()
self.logger.debug('Returned %s formatter data: %s',
self.__class__.__name__, ret)
return ret
|
yang-ling/i3pystatus
|
i3pystatus/scores/epl.py
|
Python
|
mit
| 16,454
|
[
"CRYSTAL"
] |
39107c120af190d6cc9ead426bce98f587997dacb92165d47731a3760228729d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ampOpInversor.py
#
# Copyright 2015 Roberto Tavares <roberto.tavares.filho@gmail.com>
#
# Versão: 0.1, em 11/11/2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# entrada= Avco ganho de tensão
# Rin resistencia de entrada mínima
# RL resistencia de carga
# ---------Dados do ampOp ----------------------------------------
# Vio tensão de offset de entrada
# dVio variacao da tensão de offset de entrada com temperatura
# Iio corrente de offset do operacional
# dIo variação de corrente de offset
# Rid resistencia de entrada diferencial
# Avo ganho de tensão em malha aberta
# fop frequencia do primeiro polo do op amp
# Ro resistencia de saída do amplificador
# Vn tensão de ruído equivalente na entrada do ampop
# In corrente de ruído na entrada do ampop
# --------------Sensibilidade dos resistores em relação a temperatura --------
# dRi varicao da resistenca Ri com a temperatura (ppm /grau centigrado)
# dRf variação da resitencia Rf com a temperatura (ppm/ graus centigrados)
# --------------------faixa de temperatura de uso do circuito ----------------------
# tmin temperatura minima graus centigrados
# tmax temperatura máxima em graus centigrados
import valoresPadroes
import argparse
import gettext
import sys
import os
import wx
import socket
from subprocess import call
import math
import cmath
import time
import traceback
from math import pi, sin, cos, log, sqrt, atan2
#imports para internacionalizacao
import gettext
import __builtin__
__builtin__.__dict__['_'] = wx.GetTranslation
#criacao dos IDs identificadores dos objetos
TXT_AVCOO = wx.NewId()
TXT_RIN = wx.NewId()
TXT_RL = wx.NewId()
TXT_VIO = wx.NewId()
TXT_DVIO = wx.NewId()
TXT_IIO = wx.NewId()
TXT_DIIO = wx.NewId()
TXT_RID = wx.NewId()
TXT_AVO = wx.NewId()
TXT_FOP = wx.NewId()
TXT_RO = wx.NewId()
TXT_VN = wx.NewId()
TXT_IN = wx.NewId()
TXT_DRI = wx.NewId()
TXT_DRF = wx.NewId()
TXT_TMIN = wx.NewId()
TXT_TMAX = wx.NewId()
class entradaInversorAmpOp(wx.Dialog):
"""
Implementa a tela de entrada de parametros de projeto do inversor com amp op
"""
def __init__(self):
wx.Dialog.__init__(self, None, -1, _('Inverter Amplifier Specifications'),size=(300, 150)) #inicializacao do dialogo
self.sizer1 = wx.BoxSizer(wx.VERTICAL)
self.sizer10 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer11 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer12 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer13 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer14 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer15 = wx.BoxSizer(wx.HORIZONTAL);
self.staticTxtTitulo= wx.StaticText(self, -1, _("Input Data"))
self.staticTxtTitulo.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer1.Add(self.staticTxtTitulo, flag= wx.ALIGN_CENTER_HORIZONTAL) #insere os conteudos dos sizer horizontais
self.static1= wx.StaticText(self, -1, "Avco:") #objeto texto statico
self.static1.SetToolTipString(_("Circuito closed gain"))
self.static1.SetForegroundColour(wx.BLUE)
self.avcoTxt = wx.TextCtrl(self, TXT_AVCOO,'100',size=(40,20), style=wx.TE_CENTRE)
self.sizer10.Add(self.static1,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.avcoTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static2= wx.StaticText(self, -1, "Ri min:") #objeto texto statico
self.static2.SetToolTipString(_("Minimum input resistence, ohms"))
self.static2.SetForegroundColour(wx.BLUE)
self.riminTxt = wx.TextCtrl(self, TXT_RIN,'1000',size=(60,20), style=wx.TE_CENTRE)
self.sizer10.Add(self.static2,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.riminTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static3= wx.StaticText(self, -1, "RL:") #objeto texto statico
self.static3.SetToolTipString(_("Minimum Load resistence, ohms"))
self.static3.SetForegroundColour(wx.BLUE)
self.rlTxt = wx.TextCtrl(self, TXT_RL,'2000',size=(60,20), style=wx.TE_CENTRE)
self.sizer10.Add(self.static3,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.rlTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static4= wx.StaticText(self, -1, "Vio:") #objeto texto statico
#self.static4.SetToolTipString(_("Input offset voltage (millivolts volts)"))
#self.vioTxt = wx.TextCtrl(self, TXT_VIO,'0.8',size=(40,20), style=wx.TE_CENTRE)
#self.sizer11.Add(self.static4,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer11.Add(self.vioTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static5= wx.StaticText(self, -1, "Delta Vio:") #objeto texto statico
#self.static5.SetToolTipString(_("Output change due input offset change (microvolts/centigrades degrees)"))
#self.dvioTxt = wx.TextCtrl(self, TXT_DVIO,'15',size=(40,20), style=wx.TE_CENTRE)
#self.sizer11.Add(self.static5,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer11.Add(self.dvioTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static6= wx.StaticText(self, -1, "Iio:") #objeto texto statico
#self.static6.SetToolTipString(_("Input offset currente (nA)"))
#self.iioTxt = wx.TextCtrl(self, TXT_IIO,'3',size=(40,20), style=wx.TE_CENTRE)
#self.sizer11.Add(self.static6,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer11.Add(self.iioTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static7= wx.StaticText(self, -1, "Delta Iio:") #objeto texto statico
#self.static7.SetToolTipString(_("Output change due input current offset change (nA/centigrades degrees)"))
#self.diioTxt = wx.TextCtrl(self, TXT_DIIO,'0.5',size=(40,20), style=wx.TE_CENTRE)
#self.sizer11.Add(self.static7,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer11.Add(self.diioTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static8= wx.StaticText(self, -1, "Rid:") #objeto texto statico
self.static8.SetToolTipString(_("Differential input resistence (Megaohms)"))
self.ridTxt = wx.TextCtrl(self, TXT_RID,'6',size=(40,20), style=wx.TE_CENTRE)
self.sizer11.Add(self.static8,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer11.Add(self.ridTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static9= wx.StaticText(self, -1, "Avo:") #objeto texto statico
self.static9.SetToolTipString(_("Operational amplifier open loop gain"))
self.avoTxt = wx.TextCtrl(self, TXT_AVO,'50000',size=(80,20), style=wx.TE_CENTRE)
self.sizer11.Add(self.static9,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer11.Add(self.avoTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static10= wx.StaticText(self, -1, "fop:") #objeto texto statico
self.static10.SetToolTipString(_("First pole frequency of amp op(hz)"))
self.fopTxt = wx.TextCtrl(self, TXT_FOP,'8',size=(40,20), style=wx.TE_CENTRE)
self.sizer12.Add(self.static10,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer12.Add(self.fopTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static11= wx.StaticText(self, -1, "Ro:") #objeto texto statico
self.static11.SetToolTipString(_("Output resistence of amp op(ohms)"))
self.roTxt = wx.TextCtrl(self, TXT_RO,'70',size=(40,20), style=wx.TE_CENTRE)
self.sizer12.Add(self.static11,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer12.Add(self.roTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static12= wx.StaticText(self, -1, "Vn:") #objeto texto statico
#self.static12.SetToolTipString(_("Equivalente input noise voltage (fentoV 2 / Hz)"))
#self.vnTxt = wx.TextCtrl(self, TXT_VN,'5',size=(40,20), style=wx.TE_CENTRE)
#self.sizer12.Add(self.static12,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer12.Add(self.vnTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static13= wx.StaticText(self, -1, "In:") #objeto texto statico
#self.static13.SetToolTipString(_("Equivalente input noise current (atto I 2 / Hz)"))
#self.inTxt = wx.TextCtrl(self, TXT_IN,'0.00005',size=(80,20), style=wx.TE_CENTRE)
#self.sizer12.Add(self.static13,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer12.Add(self.inTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static14= wx.StaticText(self, -1, "Delta Ri:") #objeto texto statico
#self.static14.SetToolTipString(_("Change in circuit input resistence in ppm/degree centigrade)"))
#self.static14.SetForegroundColour(wx.RED)
#self.driTxt = wx.TextCtrl(self, TXT_DRI,'100',size=(80,20), style=wx.TE_CENTRE)
#self.sizer13.Add(self.static14,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer13.Add(self.driTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
#self.static15= wx.StaticText(self, -1, "Delta Rf:") #objeto texto statico
#self.static15.SetToolTipString(_("Change in circuit feedback resistence in ppm/degree centigrade)"))
#self.static15.SetForegroundColour(wx.RED)
#self.drfTxt = wx.TextCtrl(self, TXT_DRF,'100',size=(80,20), style=wx.TE_CENTRE)
#self.sizer13.Add(self.static15,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
#self.sizer13.Add(self.drfTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.rb1 = wx.RadioButton(self, -1, _('Resistors 1% tol'), style=wx.RB_GROUP)
self.rb2 = wx.RadioButton(self, -1, _('Resistors 5% tol') )
self.sizer14.Add(self.rb1,flag= wx.RIGHT , border = 10)
self.sizer14.Add(self.rb2,flag= wx.RIGHT , border = 10)
self.okButton= wx.Button(self, wx.ID_OK, "OK") #objeto botao OK
self.cancelButton = wx.Button(self, wx.ID_CANCEL, "Cancela") #objeto botao cancela
self.sizer15.Add(self.okButton,flag= wx.EXPAND | wx.ALL, border = 5)
self.sizer15.Add(self.cancelButton,flag= wx.EXPAND | wx.ALL, border = 5)
self.sizer1.Add(self.sizer10,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer11,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer1.Add(self.sizer12,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer13,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer14,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer15,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.SetSizer(self.sizer1)
self.Fit()
def GetAvcoTxt(self):
return self.avcoTxt.GetValue()
def GetRidTxt(self):
return self.ridTxt.GetValue()
def GetRoTxt(self):
return self.roTxt.GetValue()
def GetRiminTxt(self):
return self.riminTxt.GetValue()
def GetFopTxt(self):
return self.fopTxt.GetValue()
def GetAvoTxt(self):
return self.avoTxt.GetValue()
#def GetVnTxt(self):
# return self.vnTxt.GetValue()
#def GetInTxt(self):
# return self.inTxt.GetValue()
def GetRLTxt(self):
return self.rlTxt.GetValue()
def GetTol(self):
if self.rb1.GetValue() == True:
return 1
else:
return 5
class saidaAmpOpInverter(wx.Dialog):
def __init__(self,Ri,Rf,RL,Rp,fcp,tr):
wx.Dialog.__init__(self, None, -1, _('Circuito values'),size=(300, 150)) #inicializacao do dialogo
self.sizer1 = wx.BoxSizer(wx.VERTICAL)
self.sizer10 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer11 = wx.BoxSizer(wx.VERTICAL);
self.sizer12 = wx.BoxSizer(wx.VERTICAL);
self.sizer13 = wx.BoxSizer(wx.VERTICAL);
self.sizer15 = wx.BoxSizer(wx.VERTICAL);
self.staticTxtTitulo= wx.StaticText(self, -1, _("AmpOp inverter design output data"))
self.staticTxtTitulo.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer1.Add(self.staticTxtTitulo, flag= wx.ALIGN_CENTER_HORIZONTAL)
self.texto= "RI= " + str(Ri) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Circuit input resistor"))
self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "RF= " + str(Rf) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Circuit feedback resistor"))
self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "RL= " + str(RL) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Circuit load resistence"))
self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "RP= " + str(Rp) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Circuit offset resistor"))
self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
#self.texto= "Minimum Avco= " + str(Avco_min)
#self.static1= wx.StaticText(self, -1, self.texto)
#self.static1.SetToolTipString(_("Minimum gain due to resistor tolerance"))
#self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
#self.texto= "Maximum Avco= " + str(Avco_max)
#Self.static1= wx.StaticText(self, -1, self.texto)
#self.static1.SetToolTipString(_("Maximum gain due to resistor tolerance"))
#self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "fcp= " + str(fcp) + " Hz"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Small signal bandwidth"))
self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "tr= " + str(tr) + " microseconds"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Small signal risetime"))
self.sizer11.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.okButton= wx.Button(self, wx.ID_OK, _("CLOSE")) #objeto botao OK
self.sizer15.Add(self.okButton,flag= wx.EXPAND | wx.ALL, border = 5)
#insere a figura com o esquema associado
self.img1 = wx.Image("ampOpInversor.png", wx.BITMAP_TYPE_ANY)
self.bitmap1 = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.img1))
self.sizer12.Add(self.bitmap1,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.static13= wx.StaticText(self, -1, _("For theorical explanation, please visit http://www.cadernodelaboratorio.com.br")) #objeto texto statico
self.sizer13.Add(self.static13,flag= wx.RIGHT , border = 2)
#montagem da estrutura de sizers
self.sizer10.Add(self.sizer11,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer10.Add(self.sizer12,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer10,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer13,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer15,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.SetSizer(self.sizer1) #insere o sizer de maior nivel na área ocupada pelo dialogo
self.Fit()
class ampOpInverterAmplifierDesign():
def __init__(self):
self.dialogo = entradaInversorAmpOp() #cria um dialogo de solicitacao de parametros de entrada
self.result = self.dialogo.ShowModal()
if self.result == wx.ID_OK:
self.result2= self.calculaCircuito()
self.tol= self.dialogo.GetTol()
self.RiStd= valoresPadroes.retornaValorResistor(self.Ri,self.tol,0)
self.RfStd= valoresPadroes.retornaValorResistor(self.Rf,self.tol,0)
self.RpStd= valoresPadroes.retornaValorResistor(self.Rp,self.tol,0)
self.dialogo2= saidaAmpOpInverter(self.RiStd,self.RfStd,self.RL,self.RpStd,self.fcp, self.tr*1000000)
self.result3= self.dialogo2.ShowModal()
self.dialogo2.Destroy()
self.dialogo.Destroy()
def calculaCircuito(self):
self.avco = -float(self.dialogo.GetAvcoTxt())
self.Rid= 1000000.0 * float(self.dialogo.GetRidTxt())
self.Ro= float(self.dialogo.GetRoTxt())
self.Rimin= float(self.dialogo.GetRiminTxt())
self.fop= float(self.dialogo.GetFopTxt())
self.avo= float(self.dialogo.GetAvoTxt())
#self.vn = float(self.dialogo.GetVnTxt())
#self.inoise = float(self.dialogo.GetInTxt())
self.RL= float(self.dialogo.GetRLTxt())
#calculo do valor otimo de Rf
self.beta= 1.0 / (1.0 -self.avco)
#calcula o valor de Rf otimo
self.Rf= sqrt((self.Rid * self.Ro)/(2*self.beta))
#calcula o valor de ri
self.Ri= - self.Rf/self.avco
while self.Ri< self.Rimin:
self.Rf = 1.1 * self.Rf
self.Ri = -self.Rf / self.avco
#calculo de Rp
self.Rp= (self.Ri * self.Rf)/(self.Ri + self.Rf)
#calculo da banda de passagem
self.fcp= self.fop * self.avo * self.Ri/self.Rf
#calculo de tr
self.tr = 0.35 * self.Rf/(self.fop*self.avo*self.Ri)
return True
|
cadele/cadele
|
ampOpInversor.py
|
Python
|
gpl-2.0
| 18,584
|
[
"VisIt"
] |
af4b486b45e47feb6844c37330cd97561fd0fb48cefd19e463d03e9175946d02
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
One repository to update them all
On mbed.org the mbed SDK is split up in multiple repositories, this script takes
care of updating them all.
"""
import sys
from copy import copy
from os import walk, remove, makedirs, getcwd, rmdir, listdir
from os.path import join, abspath, dirname, relpath, exists, isfile, normpath, isdir
from shutil import copyfile
from optparse import OptionParser
import re
import string
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.settings import MBED_ORG_PATH, MBED_ORG_USER, BUILD_DIR
from tools.paths import *
from tools.utils import run_cmd
MBED_URL = "mbed.org"
MBED_USER = "mbed_official"
changed = []
push_remote = True
quiet = False
commit_msg = ''
# Code that does have a mirror in the mbed SDK
# Tuple data: (repo_name, list_of_code_dirs, [team])
# team is optional - if not specified, the code is published under mbed_official
OFFICIAL_CODE = {"mbed-dev" : ["cmsis", "drivers", "hal", "platform", "targets", "mbed.h"]}
# A list of regular expressions that will be checked against each directory
# name and skipped if they match.
IGNORE_DIRS = (
)
IGNORE_FILES = (
'COPYING',
'\.md',
"\.lib",
"\.bld"
)
def ignore_path(name, reg_exps):
for r in reg_exps:
if re.search(r, name):
return True
return False
class MbedRepository:
@staticmethod
def run_and_print(command, cwd):
stdout, _, _ = run_cmd(command, work_dir=cwd, redirect=True)
print(stdout)
def __init__(self, name):
self.name = name
self.path = join(MBED_ORG_PATH, name)
self.url = "http://" + MBED_URL + "/users/" + MBED_ORG_USER + "/code/%s/"
if not exists(self.path):
# Checkout code
if not exists(MBED_ORG_PATH):
makedirs(MBED_ORG_PATH)
self.run_and_print(['hg', 'clone', self.url % name], cwd=MBED_ORG_PATH)
else:
# Update
self.run_and_print(['hg', 'pull'], cwd=self.path)
self.run_and_print(['hg', 'update'], cwd=self.path)
def publish(self):
# The maintainer has to evaluate the changes first and explicitly accept them
self.run_and_print(['hg', 'addremove'], cwd=self.path)
stdout, _, _ = run_cmd(['hg', 'status'], work_dir=self.path)
if stdout == '':
print "No changes"
return False
print stdout
if quiet:
commit = 'Y'
else:
commit = raw_input(push_remote and "Do you want to commit and push? Y/N: " or "Do you want to commit? Y/N: ")
if commit == 'Y':
args = ['hg', 'commit', '-u', MBED_ORG_USER]
# NOTE commit_msg should always come from the relevant mbed 2 release text
if commit_msg:
args = args + ['-m', commit_msg]
self.run_and_print(args, cwd=self.path)
if push_remote:
self.run_and_print(['hg', 'push'], cwd=self.path)
return True
# Check if a file is a text file or a binary file
# Taken from http://code.activestate.com/recipes/173220/
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def is_text_file(filename):
block_size = 1024
def istext(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/len(s) > 0.30:
return 0
return 1
with open(filename) as f:
res = istext(f.read(block_size))
return res
# Return the line ending type for the given file ('cr' or 'crlf')
def get_line_endings(f):
examine_size = 1024
try:
tf = open(f, "rb")
lines, ncrlf = tf.readlines(examine_size), 0
tf.close()
for l in lines:
if l.endswith("\r\n"):
ncrlf = ncrlf + 1
return 'crlf' if ncrlf > len(lines) >> 1 else 'cr'
except:
return 'cr'
# Copy file to destination, but preserve destination line endings if possible
# This prevents very annoying issues with huge diffs that appear because of
# differences in line endings
def copy_with_line_endings(sdk_file, repo_file):
if not isfile(repo_file):
copyfile(sdk_file, repo_file)
return
is_text = is_text_file(repo_file)
if is_text:
sdk_le = get_line_endings(sdk_file)
repo_le = get_line_endings(repo_file)
if not is_text or sdk_le == repo_le:
copyfile(sdk_file, repo_file)
else:
print "Converting line endings in '%s' to '%s'" % (abspath(repo_file), repo_le)
f = open(sdk_file, "rb")
data = f.read()
f.close()
f = open(repo_file, "wb")
data = data.replace("\r\n", "\n") if repo_le == 'cr' else data.replace('\n','\r\n')
f.write(data)
f.close()
def visit_files(path, visit):
for root, dirs, files in walk(path):
# Ignore hidden directories
for d in copy(dirs):
full = join(root, d)
if d.startswith('.'):
dirs.remove(d)
if ignore_path(full, IGNORE_DIRS):
print "Skipping '%s'" % full
dirs.remove(d)
for file in files:
if ignore_path(file, IGNORE_FILES):
continue
visit(join(root, file))
def visit_dirs(path, visit):
for root, dirs, files in walk(path, topdown=False):
for d in dirs:
full = join(root, d)
# We don't want to remove the .hg directory
if not '.hg' in full:
visit(full)
def update_repo(repo_name, sdk_paths, lib=False):
repo = MbedRepository(repo_name)
# copy files from mbed SDK to mbed_official repository
def visit_mbed_sdk(sdk_file):
# Source files structure is different for the compiled binary lib
# compared to the mbed-dev sources
if lib:
repo_file = join(repo.path, relpath(sdk_file, sdk_path))
else:
repo_file = join(repo.path, sdk_file)
repo_dir = dirname(repo_file)
if not exists(repo_dir):
print("CREATING: %s" % repo_dir)
makedirs(repo_dir)
copy_with_line_endings(sdk_file, repo_file)
# Go through each path specified in the mbed structure
for sdk_path in sdk_paths:
if isfile(sdk_path):
# Single file so just copy directly across
visit_mbed_sdk(sdk_path)
else:
visit_files(sdk_path, visit_mbed_sdk)
def sdk_remove(repo_path):
print("REMOVING: %s" % repo_path)
# Check if this is an empty directory or a file before determining how to
# delete it. As this function should only be called with a directory list
# after being called with a file list, the directory should automatically
# be either valid or empty .
if isfile(repo_path):
remove(repo_path)
elif isdir(repo_path) and not listdir(repo_path):
rmdir(repo_path)
else:
print("ERROR: %s is not empty, please remove manually." % repo_path)
print listdir(repo_path)
exit(1)
# remove repository files that do not exist in the mbed SDK
def visit_lib_repo(repo_path):
for sdk_path in sdk_paths:
sdk_file = join(sdk_path, relpath(repo_path, repo.path))
if not exists(sdk_file):
sdk_remove(repo_path)
# remove repository files that do not exist in the mbed SDK source
def visit_repo(repo_path):
# work out equivalent sdk path from repo file
sdk_path = join(getcwd(), relpath(repo_path, repo.path))
if not exists(sdk_path):
sdk_remove(repo_path)
# Go through each path specified in the mbed structure
# Check if there are any files in any of those paths that are no longer part of the SDK
if lib:
visit_files(repo.path, visit_lib_repo)
# Now do the same for directories that may need to be removed. This needs to be done
# bottom up to ensure any lower nested directories can be deleted first
visit_dirs(repo.path, visit_lib_repo)
else:
visit_files(repo.path, visit_repo)
# Now do the same for directories that may need to be removed. This needs to be done
# bottom up to ensure any lower nested directories can be deleted first
visit_dirs(repo.path, visit_repo)
if repo.publish():
changed.append(repo_name)
def update_code(repositories):
for repo_name in repositories.keys():
sdk_dirs = repositories[repo_name]
print '\n=== Updating "%s" ===' % repo_name
update_repo(repo_name, sdk_dirs)
def update_mbed():
update_repo("mbed", [join(BUILD_DIR, "mbed")], lib=True)
def do_sync(options):
global push_remote, quiet, commit_msg, changed
push_remote = not options.nopush
quiet = options.quiet
commit_msg = options.msg
changed = []
if options.code:
update_code(OFFICIAL_CODE)
if options.mbed:
update_mbed()
if changed:
print "Repositories with changes:", changed
return changed
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
action="store_true", default=False,
help="Update the mbed_official code")
parser.add_option("-m", "--mbed",
action="store_true", default=False,
help="Release a build of the mbed library")
parser.add_option("-n", "--nopush",
action="store_true", default=False,
help="Commit the changes locally only, don't push them")
parser.add_option("", "--commit_message",
action="store", type="string", default='', dest='msg',
help="Commit message to use for all the commits")
parser.add_option("-q", "--quiet",
action="store_true", default=False,
help="Don't ask for confirmation before commiting or pushing")
(options, args) = parser.parse_args()
do_sync(options)
|
arostm/mbed-os
|
tools/synch.py
|
Python
|
apache-2.0
| 11,095
|
[
"VisIt"
] |
4a070fcce60f10f45bf653f397b04e5ec4c113fdfb94e6c893b9971bd47a517c
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
#import glob
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import numpy as np
import tkinter as tk
from tkinter.filedialog import askopenfilename
from tkinter import ttk
import h5py
import f90nml
import collections
#from scipy.integrate import simps
import scipy.integrate as integrate
#import fidasim as fs
import scipy.interpolate as interpolate
#import numpy as np
from scipy.spatial.distance import cdist
"""
Todo
----
* use http://nbviewer.jupyter.org/gist/tillahoffmann/f844bce2ec264c1c8cb5 for binning neutral density and spec (project_image)
* change images to use angles instead of distance (ie independent of projection_dist)
* add beam centerline to imaging contour plots
* cannot edit wavelengths until after changing channel and replotting. Why? Fix this.
* fix bad plots of neutrals in mach coords by using kde
* clean plots when all data turned off
* with smart h5 reader, could load only info needed to make gui first, then only get data when called, and then save for later use
* take units from files, don't hardcode. Low priority future-proofing
* in taking mean of beam densities, should it only be for non-zero elements? As grid vol --> inf, density --> 0 otherwise
* optimize: can more stuff be loaded only when used? can more stuff be saved and not recalculated (ie set/get)?
* option to change volume element in neutral plotting for better fidelity in going from beam to mach coords
* get more intellegent h5 reader to just grab what's needed
* NPA needs work
* currently seems to load neutrals twice. check this and fix
* separate beam from project_image(). get beam angle coords in plot function itself
* add tab for plotting beam grid, los, beam centerline
"""
"""Taken from http://nbviewer.jupyter.org/gist/tillahoffmann/f844bce2ec264c1c8cb5 or
https://gist.github.com/tillahoffmann/f844bce2ec264c1c8cb5
"""
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, shape (n, ), optional, default: None
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1).
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : float
Effective sample size using Kish's approximation.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = np.atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self.weights = weights / np.sum(weights)
else:
self.weights = np.ones(self.n) / self.n
# Compute the effective sample size
# http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size
self.neff = 1.0 / np.sum(self.weights ** 2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = np.reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
# compute the normalised residuals
chi2 = cdist(points.T, self.dataset.T, 'mahalanobis', VI=self.inv_cov) ** 2
# compute the pdf
result = np.sum(np.exp(-.5 * chi2) * self.weights, axis=1) / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
return np.power(self.neff, -1./(self.d+4))
def silverman_factor(self):
return np.power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
# Compute the mean and residuals
_mean = np.sum(self.weights * self.dataset, axis=1)
_residual = (self.dataset - _mean[:, None])
# Compute the biased covariance
self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T))
# Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance)
self._data_covariance /= (1 - np.sum(self.weights ** 2))
self._data_inv_cov = np.linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) #* self.n
def to_angle_space(a, xhat, yhat, zhat):
"""Convert from distance-space to angle-space.
Let the normalized, reference vector be :math:`\\hat{z}`, the common position to be point :math:`\\vec{O}`, and a point in
question be :math:`\\vec{P}`. Let :math:`\\vec{a}=\\vec{P}-\\vec{O}`. Then the two angles giving the deviation of
:math:`\\vec{P}` from :math:`\\hat{z}` are given by:
.. math:: \\theta_1 = sign(\\vec{a} \\cdot \\hat{x}) \\cos^{-1} \\left(\\frac{(\\vec{a} - [\\vec{a} \\cdot \\hat{y}] \
\\hat{y}) \\cdot \\hat{z}}{||(\\vec{a} - [\\vec{a} \\cdot \\hat{y}]\\hat{y}||}\\right)
.. math:: \\theta_2 = sign(\\vec{a} \\cdot \\hat{y}) \\cos^{-1} \\left( \\frac{(\\vec{a} - [\\vec{a} \\cdot \\hat{x}] \
\\hat{x}) \\cdot\\hat{z}}{||(\\vec{a} - [\\vec{a} \\cdot \\hat{x}]\\hat{x}||} \\right)
Parameters
----------
a : array, (nchan, 3)
Vectors to be converted to angles. Emanating from single location ('lens')
xhat : array, (3)
Unit vector perp to yhat and zhat
yhat : array, (3)
Unit vector perp to xhat and zhat
zhat : array, (3)
Unit vector defining zero-angle position. Points from lens to some other point.
Returns
-------
angles : array (nchan, 2)
Angle that 'a' deviates from zhat in the xhat direction and in the yhat direction
"""
nchan = a.shape[0]
# Find angle in xhat, zhat plane
a_dot_xhat = np.dot(a, xhat)
a_dot_yhat = np.dot(a, yhat)
a_dot_yhat_yhat = a_dot_yhat.reshape(nchan, 1) * yhat
a_minus_a_dot_yhat_yhat = a - a_dot_yhat_yhat
a_minus_a_dot_yhat_yhat_mag = np.linalg.norm(a_minus_a_dot_yhat_yhat, axis=1)
arg1 = np.dot(a_minus_a_dot_yhat_yhat, zhat) / a_minus_a_dot_yhat_yhat_mag
arg1[arg1 > 1.] = 1. # remove floating point errors
ang1 = np.sign(a_dot_xhat) * np.arccos(arg1) # (nchan)
# Find angle in yhat, zhat plane
a_dot_xhat = np.sum(a * xhat.reshape(1, 3), axis=1)
a_dot_xhat_xhat = a_dot_xhat.reshape(nchan, 1) * xhat
a_minus_a_dot_xhat_xhat = a - a_dot_xhat_xhat
a_minus_a_dot_xhat_xhat_mag = np.linalg.norm(a_minus_a_dot_xhat_xhat, axis=1)
arg2 = np.dot(a_minus_a_dot_xhat_xhat, zhat) / a_minus_a_dot_xhat_xhat_mag
arg2[arg2 > 1.] = 1. # remove floating point errors
ang2 = np.sign(a_dot_yhat) * np.arccos(arg2) # (nchan)
return np.array([ang1, ang2]).T # (nchan, 2)
def project_image(axis=None,
lens=None,
data=None,
beam_pt=None,
beam_axis=None):
"""Given several lines of sight and an intensity per LOS, project an image on a plane perpendicular
to the average LOS axis. USES ANGLES FOUND IN A GENERAL WAY
Let the normalized, average LOS be :math:`\\hat{z}`, the lens position to be point :math:`\\vec{O}`, and a point in
question be :math:`\\vec{P}`. Let :math:`\\vec{a}=\\vec{P}-\\vec{O}`.
Parameters
----------
axis : array (nchan, 3)
Normalized axis vectors defining LOS
lens : array (3)
Common location for all LOS (aperture or lens)
data : array (nchan)
Data to be projected
beam_pt : array (3)
A point on the beam centerline
beam_axis : array (3)
Vector along beam centerline
Returns
-------
x1 : array (100)
Relative coordinates for grid_data (angle (deg) perpendicular to average LOS axis)
x2 : array (101)
Second set of relative coordinates for grid_data (angle (deg) perpendicular to average LOS axis)
grid_data : array (100, 101)
Data interpolated onto a uniform grid on a plane perpendicular to the average LOS axis
beam_pt1 : array (2)
Beam coordinates in 2-angle space for 1st beam point
beam_pt2 : array (2)
Beam coordinates in 2-angle space for 2nd beam point
Todo
----
* Choose x or yhat to consistantly be the one that is more parallel to beam axis. Could do by crossing
beam_axis instead of any_vec.
"""
# Average LOS axis
zhat = axis.mean(0)
zhat = zhat / np.linalg.norm(zhat) # (3)
# # Find any vector perp to zhat (by crossing w/ any non-colinear vector) to define the plane
# any_vec = np.array([zhat[0] + 5., zhat[1], zhat[2]]) # 5. is arbitrary
# xhat = np.cross(zhat, any_vec)
# xhat = xhat / np.linalg.norm(xhat)
#
# # Find second plane vector perp to first
# yhat = np.cross(zhat, xhat) # (3)
# Get unit vector perp to beam
yhat = np.cross(zhat, beam_axis) / np.linalg.norm(beam_axis)
# Get unit vector along beam
xhat = np.cross(yhat, zhat)
# Get angles along xhat and yhat for LOS
angs = to_angle_space(axis, xhat, yhat, zhat) # (nchan, 2)
# Interpolate data onto uniform grid of angles
n1d = 100 # no. of grid points in each direction
x1 = np.linspace(angs[:, 0].min(), angs[:, 0].max(), num = n1d)
x2 = np.linspace(angs[:, 1].min(), angs[:, 1].max(), num = n1d + 1)
x1_grid, x2_grid = np.meshgrid(x1, x2, indexing='ij')
grid_data = interpolate.griddata(np.array([angs[:, 0], angs[:, 1]]).T, data, (x1_grid, x2_grid), fill_value=0.)
# Pick two points on the beam centerline
beam_vecs = np.zeros((2, 3))
beam_vecs[0, :] = beam_pt - lens
beam_vecs[1, :] = beam_pt + beam_axis * 10. - lens
# Get angles along xhat and yhat for beam points
beam_angs = to_angle_space(beam_vecs, xhat, yhat, zhat) # (2, 2) = (2-pts, 2-space)
# Define beam centerline in angle coordinates and move beam points to edge of angle grid
beam_axis = np.squeeze(np.diff(beam_angs, axis=0))
if beam_axis[0] != 0.:
t1 = (x1.min() - beam_angs[0, 0]) / beam_axis[0]
t2 = (x1.max() - beam_angs[0, 0]) / beam_axis[0]
else:
t1 = -np.inf
t2 = np.inf
if beam_axis[1] != 0.:
t1_b = (x2.min() - beam_angs[0, 1]) / beam_axis[1]
t2_b = (x2.max() - beam_angs[0, 1]) / beam_axis[1]
t1 = np.max([t1, t1_b])
t2 = np.min([t2, t2_b])
beam_angs[0, :] = [beam_angs[0, 0] + beam_axis[0] * t1, beam_angs[0, 1] + beam_axis[1] * t1]
beam_angs[1, :] = [beam_angs[1, 0] + beam_axis[0] * t2, beam_angs[1, 1] + beam_axis[1] * t2]
return x1, x2, grid_data, beam_angs[0, :], beam_angs[1, :]
def intersect_line_plane(plane_pt1, plane_pt2, plane_pt3, line_pt, line_axis):
'''Calculate the intersection location between line and plane
Parameters
----------
Plane object
Plane to find intersection with this line
Returns
-------
list or None
Two element list: point and axis of line itself (ie line is in plane)
Three element list: Coordinates of intersection point
None: Line does not intersect plane
Notes
-----
Not implemented for multiple lines or planes
* For testing for cases in line-plane intersection, see [1]_
* For the cases where the line-plane intersection is a point, see [2]_
References
----------
.. [1] https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection#Algebraic_form
.. [2] http://mathworld.wolfram.com/Line-PlaneIntersection.html
'''
# other = plane, self = line
X1 = plane_pt1
X2 = plane_pt2
X3 = plane_pt3
X4 = line_pt
line_axis = line_axis
# Vector normal to plane
plane_norm_vec = np.cross(X1 - X2, X3 - X2)
plane_norm_vec /= np.linalg.norm(plane_norm_vec)
# Avoid using same point on line and plane. Just move further along line (arbitrarily let t = 1.)
if np.array_equal(X1, X4):
X4 = X4 + line_axis * 1.
# Test for different cases.
# Since vec1, plane_norm_vec, and line_axis are all normalized, the following dot products are [0, 1]. So can
# use a tolerance instead of comparing to zero.
tol = 1e-15
vec1 = (X4 - X1)
vec1 /= np.linalg.norm(vec1)
if np.abs(np.dot(line_axis, plane_norm_vec)) < tol:
# Line and plane are parallel
if np.abs(np.dot(vec1, plane_norm_vec)) < tol:
# Line is in the plane. Intersection is the line itself
return [X4, line_axis]
else:
# Line does not intersect plane
return None
else:
# Intersection is a point
mat1 = np.ones((4, 4), dtype=float)
mat1[1:4, 0] = X1
mat1[1:4, 1] = X2
mat1[1:4, 2] = X3
mat1[1:4, 3] = X4
mat2 = np.copy(mat1)
mat2[0, 3] = 0.
mat2[1:4, 3] = line_axis
t = -np.linalg.det(mat1) / np.linalg.det(mat2)
x = X4[0] + line_axis[0] * t
y = X4[1] + line_axis[1] * t
z = X4[2] + line_axis[2] * t
return [x, y, z]
def load_dict_from_hdf5(h5_filepath):
"""
Load h5 file as a dict
"""
def recursively_load_dict_contents_from_group(h5_obj, path):
"""
Recursively load a dict from h5 file
"""
ans = {}
for key, item in h5_obj[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = recursively_load_dict_contents_from_group(h5_obj, path + key + '/')
return ans
with h5py.File(h5_filepath, 'r') as h5_obj:
return recursively_load_dict_contents_from_group(h5_obj, '/')
def find_lenses(nchan, lens_loc):
"""Find locations for unique lenses in fidasim run
Parameters
----------
nchan : int
Total number of spectral channels (lines of sight)
lens_loc : 2D array
Cartesian coords of all lenses in machine coords, (nchan, 3)
Returns
-------
uniq_lens_indices : list
Indeces to locate spectra for each unique lens location
nlenses : int
Number of unique len locations
"""
nchan = lens_loc.shape[0]
chan = np.arange(nchan)
lens_list = [tuple(lens_loc[i,:]) for i in range(nchan)]
lens_set = set(lens_list)
uniq_lens_indices = []
for lens in lens_set:
wl = np.array([l == lens for l in lens_list])
lens_ind = chan[wl]
uniq_lens_indices.append(lens_ind)
return uniq_lens_indices, len(uniq_lens_indices)
class Spectra:
""" Spectra object that contains plot methods and parameters"""
def __init__(self, nml):
result_dir = nml["result_dir"]
runid = nml["runid"]
spec_file = os.path.join(result_dir, runid+'_spectra.h5')
geo_file = nml["geometry_file"]
self._has_spectra = os.path.isfile(spec_file)
self._has_geo = os.path.isfile(geo_file)
if self._has_spectra:
print('Loading spectra')
spec = load_dict_from_hdf5(spec_file)
self.lam = spec['lambda']
self.nchan = spec['nchan']
self.channels_spectra = collections.OrderedDict(('Channel ' + str(i + 1), i) for i in range(self.nchan))
self.dlam = np.abs(self.lam[1] - self.lam[0])
# Availability booleans
self.has_bes = ('full' in spec)
self.has_fida = ('fida' in spec)
self.has_brems = ('brems' in spec)
# Spectra frame variables (with initial values)
self.wl_min_spectra = tk.StringVar(value = str(np.min(self.lam)))
self.wl_max_spectra = tk.StringVar(value = str(np.max(self.lam)))
self.chan_spectra = tk.StringVar(value = 'Channel 1')
self.bes_on_spectra = tk.BooleanVar(value = self.has_bes)
self.fida_on_spectra = tk.BooleanVar(value = self.has_fida)
self.brems_on_spectra = tk.BooleanVar(value = self.has_brems)
self.legend_on = tk.BooleanVar(value = True)
# Imaging frame variables (with initial values)
self.wl_min_imaging = tk.StringVar(value = str(np.min(self.lam)))
self.wl_max_imaging = tk.StringVar(value = str(np.max(self.lam)))
self.full_on_imaging = tk.BooleanVar(value = self.has_bes)
self.half_on_imaging = tk.BooleanVar(value = self.has_bes)
self.third_on_imaging = tk.BooleanVar(value = self.has_bes)
self.halo_on_imaging = tk.BooleanVar(value = self.has_bes)
self.fida_on_imaging = tk.BooleanVar(value = self.has_fida)
self.brems_on_imaging = tk.BooleanVar(value = self.has_brems)
# self.projection_dist = tk.StringVar(value = 100.)
if self.has_brems:
self.brems = spec['brems']
if self.has_fida:
self.fida = spec['fida']
if self.has_bes:
self.full = spec['full']
self.half = spec['half']
self.third = spec['third']
self.halo = spec['halo']
if self._has_geo:
print('Loading geometry')
geo = load_dict_from_hdf5(geo_file)
self.lens_loc = geo['spec']['lens'] # (nchan, 3)
self.lens_axis = geo['spec']['axis'] # (nchan, 3)
self.beam_src = geo['nbi']['src']
self.beam_axis = geo['nbi']['axis']
self.uniq_lens_indeces, nlenses = find_lenses(self.nchan, self.lens_loc)
self.lenses = collections.OrderedDict(('Lens ' + str(i + 1), i) for i in range(nlenses))
self.lens = tk.StringVar(value = 'Lens 1')
# for i in range(nlenses):
# print('Lens {}: {}'.format(i + 1, self.lens_loc[self.uniq_lens_indeces[i][0], :]))
else:
print('No geometry file found')
else:
print('No Spectra File Found')
def plot_spectra(self, fig, canvas):
if self._has_spectra:
ch = self.channels_spectra[self.chan_spectra.get()]
lam = self.lam
fig.clf()
ax = fig.add_subplot(111)
if self.brems_on_spectra.get():
if self.has_brems:
ax.plot(lam, self.brems[ch, :], label = 'Brems')
else:
print('No brems spectra available')
if self.bes_on_spectra.get():
if self.has_bes:
ax.plot(lam, self.full[ch, :], label = 'Full')
ax.plot(lam, self.half[ch, :], label = 'Half')
ax.plot(lam, self.third[ch, :], label = 'Third')
ax.plot(lam, self.halo[ch, :], label = 'Halo')
else:
print('No beam spectra available')
if self.fida_on_spectra.get():
if self.has_fida:
ax.plot(lam, self.fida[ch, :], label = 'Fida')
else:
print('No FIDA spectra available')
if self.brems_on_spectra.get() or self.fida_on_spectra.get() or self.bes_on_spectra.get():
if self.legend_on.get():
ax.legend()
ax.set_yscale('log')
ax.set_xlabel('Wavelength [nm]')
ax.set_ylabel('$Ph\ /\ (s\ nm\ sr\ m^2)$')
ax.set_title(self.chan_spectra.get())
ax.set_xlim([float(self.wl_min_spectra.get()), float(self.wl_max_spectra.get())])
canvas.show()
else:
print('SPECTRA: No Spectra Selected')
else:
print('No Spectra File Found')
def plot_intensity(self, fig, canvas):
if self._has_spectra:
w1 = (self.lam >= float(self.wl_min_spectra.get()))
w2 = (self.lam <= float(self.wl_max_spectra.get()))
w = np.logical_and(w1, w2)
intens = integrate.simps(self.fida[:, w], x = self.lam[w], axis = 1)
ch = range(1, len(intens) + 1)
fig.clf()
ax = fig.add_subplot(111)
ax.plot(ch, intens)
ax.set_title('FIDA Intensity vs. Channel')
ax.set_ylabel('$Ph\ /\ (s\ sr\ m^2)$')
ax.set_xlabel('Channel Number')
ax.set_yscale('log')
canvas.show()
else: print('No Spectra File Found')
def plot_spec_image(self, fig, canvas):
"""Plot 2D contour of line-integrated spectra excluding brems
"""
torf = lambda T: 1. if T else 0.
lens = self.lenses[self.lens.get()] # this lens index (0 to nlenses-1)
ch = self.uniq_lens_indeces[lens] # (this_nchan), indeces for this lens
full_on = self.full_on_imaging.get()
half_on = self.half_on_imaging.get()
third_on = self.third_on_imaging.get()
halo_on = self.halo_on_imaging.get()
fida_on = self.fida_on_imaging.get()
fig.clf()
ax = fig.add_subplot(111)
ax.axis('equal')
if self.has_bes:
full = self.full[ch, :]
half = self.half[ch, :]
third = self.third[ch, :]
halo = self.halo[ch, :]
else:
full = 0.
half = 0.
third = 0.
halo = 0.
if full_on or half_on or third_on or halo_on:
print('No beam spectra available')
if self.has_fida:
fida = self.fida[ch, :]
else:
fida = 0.
if fida_on:
print('No FIDA spectra available')
if (fida_on) or (full_on) or (half_on) or (third_on) or (halo_on):
spec = full * torf(full_on) + half * torf(half_on) + third * torf(third_on) + \
halo * torf(halo_on) + fida * torf(fida_on)
# Integrate over wavelengths
w = (self.lam >= float(self.wl_min_imaging.get())) & (self.lam <= float(self.wl_max_imaging.get()))
spec = integrate.simps(spec[:, w], x = self.lam[w], axis = 1) # (this_nchan)
# Get all LOS vectors for this lens
lens_axis = self.lens_axis[ch, :] # (this_nchan, 3), all LOS axes for this lens
lens_loc = self.lens_loc[ch[0], :] # (3), same for all in ch
# Project all LOS data onto 2D grid perpendicular to average LOS, a distance projection_dist from the lens
x1, x2, grid_spec, beam_pt1, beam_pt2 = project_image(axis=lens_axis,
lens=lens_loc,
data=spec,
beam_pt=self.beam_src,
beam_axis=self.beam_axis)
# Plot contour
c = ax.contourf(np.degrees(x1), np.degrees(x2), grid_spec.T, 50)
cb = fig.colorbar(c)
cb.ax.set_ylabel('[$Ph\ /\ (s\ sr\ m^2)$]')
ax.set_title('Intensity\nLens at [{:4.0f},{:4.0f},{:4.0f}]'.format(lens_loc[0], lens_loc[1], lens_loc[2]))
ax.set_xlabel('X1 [deg.]')
ax.set_ylabel('X2 [deg.]')
# Overplot beam centerline
# beam_pt1 = np.degrees(beam_pt1)
# beam_pt2 = np.degrees(beam_pt2)
# ax.plot([beam_pt1[0], beam_pt2[0]], [beam_pt1[1], beam_pt2[1]], color = 'magenta')
canvas.show()
else:
print('No spectra selected to plot')
def plot_brems_image(self, fig, canvas):
"""Plot 2D contour of line-integrated brems
"""
lens = self.lenses[self.lens.get()] # this lens index (0 to nlenses-1)
ch = self.uniq_lens_indeces[lens] # (this_nchan), indeces for this lens
fig.clf()
ax = fig.add_subplot(111)
ax.axis('equal')
if self.has_brems:
brems = self.brems[ch, :]
# Integrate over wavelengths
w = (self.lam >= float(self.wl_min_imaging.get())) & (self.lam <= float(self.wl_max_imaging.get()))
spec = integrate.simps(brems[:, w], x = self.lam[w], axis = 1) # (this_nchan)
lens_axis = self.lens_axis[ch, :] # (this_nchan, 3), all LOS axes for this lens
lens_loc = self.lens_loc[ch[0], :] # (3), same for all in ch (for TAE data)
# yp_grid, zp_grid, grid_spec, valid_ic = project_image(float(self.projection_dist.get()), lens_axis, lens_loc, spec)
x1, x2, grid_spec, beam_pt, beam_axis = project_image(float(self.projection_dist.get()), lens_axis,
lens_loc, spec, self.beam_src, self.beam_axis)
# Find where beam hits edges of target plane (Assumes crosses left and right, not general solution)
t1 = (np.min(x1) - beam_pt[0]) / beam_axis[0]
t2 = (np.max(x1) - beam_pt[0]) / beam_axis[0]
beam_pt1 = beam_pt + beam_axis * t1
beam_pt2 = beam_pt + beam_axis * t2
# Plot contour
# c = ax.contourf(yp_grid, zp_grid, grid_spec, 50)
c = ax.contourf(x1, x2, grid_spec.T, 50)
cb = fig.colorbar(c)
cb.ax.set_ylabel('[$Ph\ /\ (s\ sr\ m^2)$]')
ax.set_title('Intensity\nLens at [{:4.0f},{:4.0f},{:4.0f}]'.format(lens_loc[0], lens_loc[1], lens_loc[2]))
ax.set_xlabel('X1 [cm]')
ax.set_ylabel('X2 [cm]')
# Overplot beam centerline
ax.plot([beam_pt1[0], beam_pt2[0]], [beam_pt1[1], beam_pt2[1]], color = 'magenta')
canvas.show()
else:
print('No brems spectra available')
def reset_wave_spectra(self):
self.wl_min_spectra.set(np.min(self.lam))
self.wl_max_spectra.set(np.max(self.lam))
def reset_wave_imaging(self):
self.wl_min_imaging.set(np.min(self.lam))
self.wl_max_imaging.set(np.max(self.lam))
class NPA:
""" NPA object that contains plot methods and parameters"""
def __init__(self, nml):
result_dir = nml["result_dir"]
runid = nml["runid"]
npa_file = os.path.join(result_dir, runid + '_npa.h5')
wght_file = os.path.join(result_dir, runid + '_npa_weights.h5')
neut_file = os.path.join(result_dir, runid + '_neutrals.h5')
geo_file = nml["geometry_file"]
self._has_npa = os.path.isfile(npa_file)
self._has_wght = os.path.isfile(wght_file)
self._has_neut = os.path.isfile(neut_file)
self._has_geo = os.path.isfile(geo_file)
if self._has_npa:
print('Loading NPA')
npa = load_dict_from_hdf5(npa_file)
self.npa_energy = npa['energy']
self.npa_flux = npa['flux']
self.nchan = npa['nchan']
else:
print('No NPA file found')
if self._has_wght:
print('Loading NPA weights')
wght = load_dict_from_hdf5(wght_file)
self.w_energy = wght['energy']
self.w_flux = wght['flux']
else:
print('No NPA weights file found')
if self._has_neut:
neut = load_dict_from_hdf5(neut_file)
self.dens = neut['fdens'].sum(0).sum(0) + neut['hdens'].sum(0).sum(0) + \
neut['tdens'].sum(0).sum(0) + neut['halodens'].sum(0).sum(0)
else:
print('No neutrals file found')
if (self._has_npa or self._has_wght):
self.channels_npa = collections.OrderedDict(('Channel ' + str(i + 1), i) for i in range(0, self.nchan)) # should it be nchan not 3???
self.chan_npa = tk.StringVar(value = 'Channel 1')
def plot_neutral_birth(self, fig, canvas):
if self._has_npa:
fig.clf()
ax = fig.add_subplot(111)
ch = self.channels_npa[self.chan_npa.get()]
if self._has_neut:
ax.plot(self.x_grid[0,:,:],self.y_grid[0,:,:],'k,')
ax.contour(self.x_grid[0,:,:],self.y_grid[0,:,:],self.dens,20)
ax.plot([self.xlos[ch],self.xlens[ch]],[self.ylos[ch],self.ylens[ch]],'k')
ax.set_title('Neutral Birth Position')
ax.set_xlim(min(self.x_grid[0,0,:]) ,max(self.x_grid[0,0,:]))
ax.set_ylim(min(self.y_grid[0,:,0]),max(self.y_grid[0,:,0]))
ax.set_xlabel('x [cm]')
ax.set_ylabel('y [cm]')
canvas.show()
else:
print('NPA: No file')
def plot_flux(self, fig, canvas):
if self._has_npa or self._has_wght:
fig.clf()
ax = fig.add_subplot(111)
ch = self.channels_npa[self.chan_npa.get()]
if self._has_npa:
ax.step(self.npa_energy,self.npa_flux[ch,:],label = 'MC Flux')
if self._has_wght:
ax.plot(self.w_energy,self.w_flux[ch,:],label = 'WF Flux')
ax.legend()
ax.set_title('Neutral Flux: '+self.chan_npa.get())
ax.set_ylabel('Flux')
ax.set_xlabel('Energy [keV]')
canvas.show()
else: print('No NPA file found')
class Weights:
""" Weights object that contains plot methods and parameters"""
def __init__(self,nml):
result_dir = nml["result_dir"]
runid = nml["runid"]
npa_wght_file = os.path.join(result_dir,runid+'_npa_weights.h5')
fida_wght_file = os.path.join(result_dir,runid+'_fida_weights.h5')
self._has_npa_wght = os.path.isfile(npa_wght_file)
self._has_fida_wght = os.path.isfile(fida_wght_file)
if self._has_fida_wght:
print('Loading FIDA weights')
fida = load_dict_from_hdf5(fida_wght_file)
self.f_energy = fida['energy']
self.f_pitch = fida['pitch']
self.lam = fida['lambda']
self.dlam = np.abs(self.lam[1] - self.lam[0])
self.wl_max = np.max(self.lam)
self.wl_min = np.min(self.lam)
self.f_rad = fida['radius']
self.f_wght = fida['weight']
self.f_chan = len(self.f_rad)
self.fida_chans = collections.OrderedDict(('Channel '+str(i+1),i) for i in range(0,self.f_chan))
else:
print('No FIDA weights found')
if self._has_npa_wght:
npa = load_dict_from_hdf5(npa_wght_file)
self.n_energy = npa['energy']
self.n_pitch = npa['pitch']
self.n_wght = npa['weight']
self.n_rad = npa['radius']
self.n_nchan = npa['nchan'] #len(self.n_rad)
self.npa_chans = collections.OrderedDict(('Channel ' + str(i + 1), i) for i in range(0, self.n_nchan))
self.lam_val = tk.DoubleVar(value = 655.0)
self.fida_chan = tk.StringVar(value = 'Channel 1')
self.npa_chan = tk.StringVar(value = 'Channel 1')
def plot_npa_weights(self,fig,canvas):
if self._has_npa_wght:
ch = self.npa_chans[self.npa_chan.get()]
fig.clf()
ax = fig.add_subplot(111)
c = ax.contourf(self.n_energy, self.n_pitch, self.n_wght[ch,:,:], 50)
fig.colorbar(c)
ax.set_title('NPA Weight')
ax.set_ylabel('Pitch')
ax.set_xlabel('Energy [keV]')
canvas.show()
def plot_fida_weights(self,fig,canvas):
if self._has_fida_wght:
ch = self.fida_chans[self.fida_chan.get()]
wl = float(self.lam_val.get())
ind = np.argmin(np.abs(self.lam-wl))
fig.clf()
ax = fig.add_subplot(111)
c = ax.contourf(self.f_energy,self.f_pitch,self.f_wght[ch,:,:,ind],30)
fig.colorbar(c)
ax.set_xlabel('Energy [keV]')
ax.set_ylabel('Pitch')
ax.set_title('FIDA Weight')
canvas.show()
class Neutrals:
""" Neutrals object that contains plot methods and parameters"""
def __init__(self, nml):
result_dir = nml["result_dir"]
runid = nml["runid"]
neut_file = os.path.join(result_dir,runid+'_neutrals.h5')
geo_file = nml["geometry_file"]
self._has_neut = os.path.isfile(neut_file)
self._has_geo = os.path.isfile(geo_file)
if self._has_geo:
print('Loading geometry')
geo = load_dict_from_hdf5(geo_file)
self.beam_name = geo['nbi']['name'].decode('UTF-8')
else:
print('No geometry file found')
if self._has_neut:
print('Loading neutrals')
neut = load_dict_from_hdf5(neut_file)
# All grids and gridded data to --> (nx, ny, nz)
self.fdens = neut['fdens'].sum(3).T # sum over energy state
self.hdens = neut['hdens'].sum(3).T
self.tdens = neut['tdens'].sum(3).T
self.halodens = neut['halodens'].sum(3).T
self.x_grid = neut['grid']['x_grid'].T # mach coords
self.y_grid = neut['grid']['y_grid'].T # mach coords
self.z_grid = neut['grid']['z_grid'].T # mach coords
self.nx = neut['grid']['nx']
self.ny = neut['grid']['ny']
self.nz = neut['grid']['nz']
# beam coords
self.x_grid_beam, self.y_grid_beam, self.z_grid_beam = np.meshgrid(neut['grid']['x'], neut['grid']['y'], neut['grid']['z'], indexing='ij')
# Are beam and machine coordinates the same?
self.beam_mach_same = np.array_equal(self.x_grid, self.x_grid_beam) and np.array_equal(self.y_grid, self.y_grid_beam) and np.array_equal(self.z_grid, self.z_grid_beam)
else:
print('No neutrals file found')
## Radio Buttons Variable
self.plot_type = tk.StringVar(value = 'XY')
## Checkbox Variables
self.use_mach_coords = tk.BooleanVar(value = False)
self.full_on_neutrals = tk.BooleanVar(value = True)
self.half_on_neutrals = tk.BooleanVar(value = True)
self.third_on_neutrals = tk.BooleanVar(value = True)
self.halo_on_neutrals = tk.BooleanVar(value = True)
self.transpose = tk.BooleanVar(value = False)
def plot_neutrals(self, fig, canvas):
full_on = self.full_on_neutrals.get()
half_on = self.half_on_neutrals.get()
third_on = self.third_on_neutrals.get()
halo_on = self.halo_on_neutrals.get()
torf = lambda T: 1 if T else 0
if self._has_neut:
if not (full_on or half_on or third_on or halo_on):
print('No neutrals selected to plot')
else:
fig.clf()
ax = fig.add_subplot(111)
pt = self.plot_type.get()
if pt == 'X':
if self.use_mach_coords.get() and not self.beam_mach_same:
# Use machine coords and they're not the same as beam coords (so must rebin)
ax.set_xlabel('X [cm]')
# Need to bin data onto mach regular grid before taking projections
fdens_hist = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (self.nx, self.ny), weights=self.fdens.flatten())
fdens = fdens_hist[0]
# Histogram returns edges of shape (nx+1). Convert to centers
xedges = fdens_hist[1]
yedges = fdens_hist[2]
dx = xedges[1] - xedges[0]
x = xedges[0:-1] + dx / 2.
hdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (xedges, yedges), weights=self.hdens.flatten())[0]
tdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (xedges, yedges), weights=self.tdens.flatten())[0]
halodens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (xedges, yedges), weights=self.halodens.flatten())[0]
# histogram2d sums weights, need mean
fdens = fdens.mean(1) / self.nz
hdens = hdens.mean(1) / self.nz
tdens = tdens.mean(1) / self.nz
halodens = halodens.mean(1) / self.nz
else:
# Use beam coords or beam and machine coords are the same
if self.use_mach_coords.get():
ax.set_xlabel('X [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$X = X_{beam}$ [cm]')
else:
ax.set_xlabel('$X_{beam}$ [cm]')
# Use data as is for beam coords or when coord systems are the same
x = self.x_grid_beam[:, 0, 0]
fdens = self.fdens.mean(1).mean(1)
hdens = self.hdens.mean(1).mean(1)
tdens = self.tdens.mean(1).mean(1)
halodens = self.halodens.mean(1).mean(1)
if full_on: ax.plot(x, fdens, label = 'Full')
if half_on: ax.plot(x, hdens, label = 'Half')
if third_on: ax.plot(x, tdens, label = 'Third')
if halo_on: ax.plot(x, halodens, label = 'Halo')
ax.legend()
ax.set_title('Neutral Density. NB {}'.format(self.beam_name))
ax.set_ylabel('Mean Density [$cm^{-3}$]')
canvas.show()
if pt == 'Y':
if self.use_mach_coords.get() and not self.beam_mach_same:
# Use machine coords and they're not the same as beam coords (so must rebin)
ax.set_xlabel('Y [cm]')
# Need to bin data onto mach regular grid before taking projections
fdens_hist = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (self.nx, self.ny), weights=self.fdens.flatten())
fdens = fdens_hist[0]
# Histogram returns edges of shape (nx+1). Convert to centers
xedges = fdens_hist[1]
yedges = fdens_hist[2]
dx = yedges[1] - yedges[0]
x = yedges[0:-1] + dx / 2.
hdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (xedges, yedges), weights=self.hdens.flatten())[0]
tdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (xedges, yedges), weights=self.tdens.flatten())[0]
halodens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(), bins = (xedges, yedges), weights=self.halodens.flatten())[0]
# histogram2d sums weights, need mean
fdens = fdens.mean(0) / self.nz
hdens = hdens.mean(0) / self.nz
tdens = tdens.mean(0) / self.nz
halodens = halodens.mean(0) / self.nz
else:
# Use beam coords or beam and machine coords are the same
if self.use_mach_coords.get():
ax.set_xlabel('Y [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$Y = Y_{beam}$ [cm]')
else:
ax.set_xlabel('$Y_{beam}$ [cm]')
# Use data as is for beam coords or when coord systems are the same
x = self.y_grid_beam[0, :, 0]
fdens = self.fdens.mean(0).mean(1)
hdens = self.hdens.mean(0).mean(1)
tdens = self.tdens.mean(0).mean(1)
halodens = self.halodens.mean(0).mean(1)
if full_on: ax.plot(x, fdens, label = 'Full')
if half_on: ax.plot(x, hdens, label = 'Half')
if third_on: ax.plot(x, tdens, label = 'Third')
if halo_on: ax.plot(x, halodens, label = 'Halo')
ax.legend()
ax.set_title('Neutral Density. NB {}'.format(self.beam_name))
ax.set_ylabel('Mean Density [$cm^{-3}$]')
canvas.show()
if pt == 'Z':
if self.use_mach_coords.get() and not self.beam_mach_same:
# Use machine coords and they're not the same as beam coords (so must rebin)
ax.set_xlabel('Z [cm]')
# Need to bin data onto mach regular grid before taking projections
fdens_hist = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (self.nx, self.nz), weights=self.fdens.flatten())
fdens = fdens_hist[0]
# Histogram returns edges of shape (nx+1). Convert to centers
xedges = fdens_hist[1]
yedges = fdens_hist[2]
dx = yedges[1] - yedges[0]
x = yedges[0:-1] + dx / 2.
hdens = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.hdens.flatten())[0]
tdens = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.tdens.flatten())[0]
halodens = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.halodens.flatten())[0]
# histogram2d sums weights, need mean
fdens = fdens.mean(0) / self.ny
hdens = hdens.mean(0) / self.ny
tdens = tdens.mean(0) / self.ny
halodens = halodens.mean(0) / self.ny
else:
# Use beam coords or beam and machine coords are the same
if self.use_mach_coords.get():
ax.set_xlabel('Z [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$Z = Z_{beam}$ [cm]')
else:
ax.set_xlabel('$Z_{beam}$ [cm]')
# Use data as is for beam coords or when coord systems are the same
x = self.z_grid_beam[0, 0, :]
fdens = self.fdens.mean(0).mean(0)
hdens = self.hdens.mean(0).mean(0)
tdens = self.tdens.mean(0).mean(0)
halodens = self.halodens.mean(0).mean(0)
if full_on: ax.plot(x, fdens, label = 'Full')
if half_on: ax.plot(x, hdens, label = 'Half')
if third_on: ax.plot(x, tdens, label = 'Third')
if halo_on: ax.plot(x, halodens, label = 'Halo')
ax.legend()
ax.set_title('Neutral Density. NB {}'.format(self.beam_name))
ax.set_ylabel('Mean Density [$cm^{-3}$]')
canvas.show()
if pt == 'XY':
if self.use_mach_coords.get() and not self.beam_mach_same:
use_histogram = True
if use_histogram is True:
###############################################
# MAINTAIN ORIGINAL NUMBER OF POINTS
###############################################
# Use machine coords and they're not the same as beam coords (so must rebin)
ax.set_xlabel('X [cm]')
ax.set_ylabel('Y [cm]')
# Need to bin data onto mach regular grid before taking projections
fdens_hist = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
bins = (self.nx, self.ny), weights=self.fdens.flatten())
fdens = fdens_hist[0]
# Histogram returns edges of shape (nx+1). Convert to centers
xedges = fdens_hist[1]
yedges = fdens_hist[2]
dx = xedges[1] - xedges[0]
dy = yedges[1] - yedges[0]
x = xedges[0:-1] + dx / 2.
y = yedges[0:-1] + dy / 2.
hdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
bins = (xedges, yedges), weights=self.hdens.flatten())[0]
tdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
bins = (xedges, yedges), weights=self.tdens.flatten())[0]
halodens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
bins = (xedges, yedges), weights=self.halodens.flatten())[0]
# histogram2d sums weights, need mean
fdens = fdens / self.nz
hdens = hdens / self.nz
tdens = tdens / self.nz
halodens = halodens / self.nz
# print('original dx {}'.format((self.x_grid[1, 0, 0] - self.x_grid[0, 0, 0]) * (self.y_grid[0, 1, 0] - self.y_grid[0, 0, 0])))
# print('new dx {}'.format(dx * dy))
###############################################
# MAINTAIN ORIGINAL RESOLUTION
###############################################
# # Use machine coords and they're not the same as beam coords (so must rebin)
# ax.set_xlabel('X [cm]')
# ax.set_ylabel('Y [cm]')
#
# dx = np.abs(self.x_grid[1, 0, 0] - self.x_grid[0, 0, 0])
# dy = np.abs(self.y_grid[0, 1, 0] - self.y_grid[0, 0, 0])
# xmin, xmax = self.x_grid.min(), self.x_grid.max()
# ymin, ymax = self.y_grid.min(), self.y_grid.max()
# nx = np.floor((xmax - xmin) / dx) + 1
# ny = np.floor((ymax - ymin) / dy) + 1
# x = np.linspace(xmin, xmax, num=nx)
# y = np.linspace(ymin, ymax, num=ny)
# xedges = x - dx / 2.
# yedges = y - dx / 2.
# xedges = np.append(xedges, xedges[-1] + dx)
# yedges = np.append(yedges, yedges[-1] + dy)
#
# # Need to bin data onto mach regular grid before taking projections
# fdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
# bins = (xedges, yedges), weights=self.fdens.flatten())[0]
# hdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
# bins = (xedges, yedges), weights=self.hdens.flatten())[0]
# tdens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
# bins = (xedges, yedges), weights=self.tdens.flatten())[0]
# halodens = np.histogram2d(self.x_grid.flatten(), self.y_grid.flatten(),
# bins = (xedges, yedges), weights=self.halodens.flatten())[0]
#
# # histogram2d sums weights, need mean
# fdens = fdens / self.nz
# hdens = hdens / self.nz
# tdens = tdens / self.nz
# halodens = halodens / self.nz
else:
# Use KDE
x = np.linspace(self.x_grid.min(), self.x_grid.max(), self.nx)
y = np.linspace(self.y_grid.min(), self.y_grid.max(), self.ny)
xx, yy = np.meshgrid(x, y)
fdens = np.zeros((3, np.max(self.fdens.shape))) # make (ndims, ndata)
fdens[0, 0:self.nx] = self.fdens[:, 0, 0]
fdens[1, 0:self.ny] = self.fdens[0, :, 0]
fdens[2, 0:self.nz] = self.fdens[0, 0, :]
pdf = gaussian_kde(fdens, weights=fdens)
fdens = pdf((np.ravel(xx), np.ravel(yy)))
fdens = np.reshape(fdens, xx.shape)
else:
# Use data as is for beam coords or when coord systems are the same
x = self.x_grid_beam[:, 0, 0]
y = self.y_grid_beam[0, :, 0]
fdens = self.fdens.mean(2)
hdens = self.hdens.mean(2)
tdens = self.tdens.mean(2)
halodens = self.halodens.mean(2)
if self.transpose.get():
if self.use_mach_coords.get():
ax.set_xlabel('Y [cm]')
ax.set_ylabel('X [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$Y = Y_{beam}$ [cm]')
ax.set_ylabel('$X = X_{beam}$ [cm]')
else:
ax.set_xlabel('$Y_{beam}$ [cm]')
ax.set_ylabel('$X_{beam}$ [cm]')
else:
if self.use_mach_coords.get():
ax.set_xlabel('X [cm]')
ax.set_ylabel('Y [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$X = X_{beam}$ [cm]')
ax.set_ylabel('$Y = Y_{beam}$ [cm]')
else:
ax.set_xlabel('$X_{beam}$ [cm]')
ax.set_ylabel('$Y_{beam}$ [cm]')
dens = fdens * torf(full_on) + hdens * torf(half_on) + tdens * torf(third_on) + halodens * torf(halo_on)
ax.axis('equal')
if self.transpose.get():
c = ax.contourf(y, x, dens, 50)
else:
c = ax.contourf(x, y, dens.T, 50)
cb = fig.colorbar(c)
cb.ax.set_ylabel('[$cm^{-3}$]')
ax.set_title('Mean Neutral Density. {}'.format(self.beam_name))
canvas.show()
if pt == 'XZ':
if self.use_mach_coords.get() and not self.beam_mach_same:
# Use machine coords and they're not the same as beam coords (so must rebin)
ax.set_xlabel('X [cm]')
ax.set_ylabel('Z [cm]')
# Need to bin data onto mach regular grid before taking projections
fdens_hist = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (self.nx, self.nz), weights=self.fdens.flatten())
fdens = fdens_hist[0]
# Histogram returns edges of shape (nx+1). Convert to centers
xedges = fdens_hist[1]
yedges = fdens_hist[2]
dx = xedges[1] - xedges[0]
dy = yedges[1] - yedges[0]
x = xedges[0:-1] + dx / 2.
y = yedges[0:-1] + dy / 2.
# x, y = np.meshgrid(x, y, indexing='ij')
hdens = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.hdens.flatten())[0]
tdens = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.tdens.flatten())[0]
halodens = np.histogram2d(self.x_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.halodens.flatten())[0]
# histogram2d sums weights, need mean
fdens = fdens / self.ny
hdens = hdens / self.ny
tdens = tdens / self.ny
halodens = halodens / self.ny
else:
# Use beam coords or beam and machine coords are the same
x = self.x_grid_beam[:, 0, 0]
y = self.z_grid_beam[0, 0, :]
fdens = self.fdens.mean(1)
hdens = self.hdens.mean(1)
tdens = self.tdens.mean(1)
halodens = self.halodens.mean(1)
if self.transpose.get():
if self.use_mach_coords.get():
ax.set_xlabel('Z [cm]')
ax.set_ylabel('X [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$Z = Z_{beam}$ [cm]')
ax.set_ylabel('$X = X_{beam}$ [cm]')
else:
ax.set_xlabel('$Z_{beam}$ [cm]')
ax.set_ylabel('$X_{beam}$ [cm]')
else:
if self.use_mach_coords.get():
ax.set_xlabel('X [cm]')
ax.set_ylabel('Z [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$X = X_{beam}$ [cm]')
ax.set_ylabel('$Z = Z_{beam}$ [cm]')
else:
ax.set_xlabel('$X_{beam}$ [cm]')
ax.set_ylabel('$Z_{beam}$ [cm]')
dens = fdens * torf(full_on) + hdens * torf(half_on) + tdens * torf(third_on) + halodens * torf(halo_on)
ax.axis('equal')
if self.transpose.get():
c = ax.contourf(y, x, dens, 50)
else:
c = ax.contourf(x, y, dens.T, 50)
cb = fig.colorbar(c)
cb.ax.set_ylabel('[$cm^{-3}$]')
ax.set_title('Mean Neutral Density. NB {}'.format(self.beam_name))
canvas.show()
if pt == 'YZ':
if self.use_mach_coords.get() and not self.beam_mach_same:
# Use machine coords and they're not the same as beam coords
ax.set_xlabel('Y [cm]')
ax.set_ylabel('Z [cm]')
# Need to bin data onto mach regular grid before taking projections
fdens_hist = np.histogram2d(self.y_grid.flatten(), self.z_grid.flatten(), bins = (self.ny, self.nz), weights=self.fdens.flatten())
fdens = fdens_hist[0]
# Histogram returns edges of shape (nx+1). Convert to centers
xedges = fdens_hist[1]
yedges = fdens_hist[2]
dx = xedges[1] - xedges[0]
dy = yedges[1] - yedges[0]
x = xedges[0:-1] + dx / 2.
y = yedges[0:-1] + dy / 2.
# x, y = np.meshgrid(x, y, indexing='ij')
hdens = np.histogram2d(self.y_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.hdens.flatten())[0]
tdens = np.histogram2d(self.y_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.tdens.flatten())[0]
halodens = np.histogram2d(self.y_grid.flatten(), self.z_grid.flatten(), bins = (xedges, yedges), weights=self.halodens.flatten())[0]
# histogram2d sums weights, need mean
fdens = fdens / self.nx
hdens = hdens / self.nx
tdens = tdens / self.nx
halodens = halodens / self.nx
else:
# Use beam coords or beam and machine coords are the same
x = self.y_grid_beam[0, :, 0]
y = self.z_grid_beam[0, 0, :]
fdens = self.fdens.mean(0)
hdens = self.hdens.mean(0)
tdens = self.tdens.mean(0)
halodens = self.halodens.mean(0)
if self.transpose.get():
if self.use_mach_coords.get():
ax.set_xlabel('Z [cm]')
ax.set_ylabel('Y [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$Z = Z_{beam}$ [cm]')
ax.set_ylabel('$Y = Y_{beam}$ [cm]')
else:
ax.set_xlabel('$Z_{beam}$ [cm]')
ax.set_ylabel('$Y_{beam}$ [cm]')
else:
if self.use_mach_coords.get():
ax.set_xlabel('Y [cm]')
ax.set_ylabel('Z [cm]')
elif self.beam_mach_same:
ax.set_xlabel('$Y = Y_{beam}$ [cm]')
ax.set_ylabel('$Z = Z_{beam}$ [cm]')
else:
ax.set_xlabel('$Y_{beam}$ [cm]')
ax.set_ylabel('$Z_{beam}$ [cm]')
dens = fdens * torf(full_on) + hdens * torf(half_on) + tdens * torf(third_on) + halodens * torf(halo_on)
ax.axis('equal')
if self.transpose.get():
c = ax.contourf(y, x, dens, 50)
else:
c = ax.contourf(x, y, dens.T, 50)
cb = fig.colorbar(c)
cb.ax.set_ylabel('[$cm^{-3}$]')
ax.set_title('Mean Neutral Density. NB {}'.format(self.beam_name))
canvas.show()
class Viewer:
"""Class that contains FIDAsim result viewer window"""
def __init__(self, parent):
self.load_namelist()
parent.title('FIDAviewer. {}'.format(self.namelistfile))
# Make MenuBar
self.MenuBar = tk.Menu(parent)
parent.config(menu = self.MenuBar)
self.file = tk.Menu(self.MenuBar, tearoff = False)
self.file.add_command(label = 'Load Run', command = (lambda: self.load_namelist()))
self.file.add_command(label = 'Quit', command = (lambda: sys.exit()))
self.MenuBar.add_cascade(label = 'File', menu = self.file, underline = 0)
# Make Notebook
self.nb = ttk.Notebook(parent)
self.spectra_frame = ttk.Frame(self.nb)
self.npa_frame = ttk.Frame(self.nb)
self.neutrals_frame = ttk.Frame(self.nb)
self.weights_frame = ttk.Frame(self.nb)
self.imaging_frame = ttk.Frame(self.nb)
self.nb.add(self.spectra_frame, text = 'Spectra')
self.nb.add(self.npa_frame ,text = 'NPA')
self.nb.add(self.neutrals_frame, text = 'Neutrals')
self.nb.add(self.weights_frame, text = 'Weights')
self.nb.add(self.imaging_frame, text = 'Imaging')
self.nb.pack(side = tk.LEFT , expand = tk.Y, fill = tk.BOTH)
self.fig = plt.Figure(figsize = (6, 5), dpi = 100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.fig, master = parent)
self.canvas.get_tk_widget().pack(side = tk.RIGHT)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, parent)
self.toolbar.update()
self.canvas._tkcanvas.pack(side = tk.TOP, expand = tk.Y, fill = tk.BOTH)
# Spectra Frame
if self.spec._has_spectra:
ttk.Combobox(self.spectra_frame, textvariable = self.spec.chan_spectra,
values = list(self.spec.channels_spectra.keys())).pack()
ttk.Checkbutton(self.spectra_frame, text = 'Hide BES', variable = self.spec.bes_on_spectra,
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.spectra_frame,text = 'Hide FIDA', variable = self.spec.fida_on_spectra,
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.spectra_frame,text = 'Hide Bremsstrahlung', variable = self.spec.brems_on_spectra,\
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.spectra_frame, text = 'Hide Legend', variable = self.spec.legend_on,\
onvalue = False, offvalue = True).pack()
ttk.Label(self.spectra_frame, text = 'Wavelength Min (nm)').pack()
ttk.Entry(self.spectra_frame, textvariable = self.spec.wl_min_spectra, state = tk.NORMAL, width = 10).pack()
ttk.Label(self.spectra_frame, text = 'Wavelength Max (nm)').pack()
ttk.Entry(self.spectra_frame, textvariable = self.spec.wl_max_spectra, state = tk.NORMAL, width = 10).pack()
ttk.Button(self.spectra_frame, text = 'Reset Wavelength',\
command = (lambda: self.spec.reset_wave_spectra())).pack(side = tk.TOP)
ttk.Button(self.spectra_frame, text = 'Plot Spectra',\
command = (lambda: self.spec.plot_spectra(self.fig, self.canvas))).pack(side = tk.TOP, expand = tk.Y, fill = tk.BOTH)
ttk.Button(self.spectra_frame,text = 'Plot Intensity',\
command = (lambda: self.spec.plot_intensity(self.fig, self.canvas))).pack(side = tk.TOP, expand = tk.Y, fill = tk.BOTH)
else:
ttk.Label(self.spectra_frame, text = '\n\nNo spectral data found').pack()
# NPA Frame
if self.npa._has_npa:
ttk.Combobox(self.npa_frame, textvariable = self.npa.chan_npa, values = tuple(self.npa.channels_npa.keys())).pack()
ttk.Button(self.npa_frame, text = 'Plot Neutral Birth',\
command = (lambda: self.npa.plot_neutral_birth(self.fig, self.canvas))).pack(side = tk.TOP, expand = tk.Y,fill = tk.BOTH)
ttk.Button(self.npa_frame, text = 'Plot Flux',\
command = (lambda: self.npa.plot_flux(self.fig, self.canvas))).pack(side = tk.TOP,expand = tk.Y, fill = tk.BOTH)
else:
ttk.Label(self.npa_frame, text = '\n\nNo NPA data found').pack()
# Neutrals Frame
ttk.Radiobutton(self.neutrals_frame, text = 'Density vs X', variable = self.neut.plot_type, value = 'X').pack()
ttk.Radiobutton(self.neutrals_frame, text = 'Density vs Y', variable = self.neut.plot_type, value = 'Y').pack()
ttk.Radiobutton(self.neutrals_frame, text = 'Density vs Z', variable = self.neut.plot_type, value = 'Z').pack()
ttk.Radiobutton(self.neutrals_frame, text = 'Contour XY', variable = self.neut.plot_type, value = 'XY').pack()
ttk.Radiobutton(self.neutrals_frame, text = 'Contour XZ', variable = self.neut.plot_type, value = 'XZ').pack()
ttk.Radiobutton(self.neutrals_frame, text = 'Contour YZ', variable = self.neut.plot_type, value = 'YZ').pack()
ttk.Checkbutton(self.neutrals_frame, text = 'Use Machine Coordinates', variable = self.neut.use_mach_coords,\
onvalue = True, offvalue = False).pack()
ttk.Checkbutton(self.neutrals_frame, text = 'Hide Full', variable = self.neut.full_on_neutrals,\
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.neutrals_frame, text = 'Hide Half', variable = self.neut.half_on_neutrals,\
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.neutrals_frame, text = 'Hide Third', variable = self.neut.third_on_neutrals,\
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.neutrals_frame, text = 'Hide Halo', variable = self.neut.halo_on_neutrals,\
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.neutrals_frame, text = 'Transpose', variable = self.neut.transpose,\
onvalue = True, offvalue = False).pack()
ttk.Button(self.neutrals_frame, text = 'Plot',\
command = (lambda: self.neut.plot_neutrals(self.fig, self.canvas))).pack(expand = tk.Y, fill = tk.BOTH)
# Weights Frame
if self.wght._has_fida_wght:
ttk.Combobox(self.weights_frame,textvariable = self.wght.fida_chan,\
values = tuple(self.wght.fida_chans.keys())).pack()
tk.Scale(self.weights_frame,orient = tk.HORIZONTAL, length = 200,\
from_ = self.wght.wl_min, to = self.wght.wl_max, resolution = self.wght.dlam, variable = self.wght.lam_val).pack()
ttk.Button(self.weights_frame,text = 'Plot FIDA Weights',\
command = (lambda: self.wght.plot_fida_weights(self.fig,self.canvas))).pack(side = tk.TOP,expand = tk.Y,fill = tk.BOTH)
else:
ttk.Label(self.weights_frame, text = '\n\nNo FIDA weight data found').pack()
if self.wght._has_npa_wght:
ttk.Combobox(self.weights_frame,textvariable = self.wght.npa_chan,\
values = tuple(self.wght.npa_chans.keys())).pack()
ttk.Button(self.weights_frame,text = 'Plot NPA Weights',\
command = (lambda: self.wght.plot_npa_weights(self.fig,self.canvas))).pack(side = tk.TOP,expand = tk.Y,fill = tk.BOTH)
else:
ttk.Label(self.weights_frame, text = '\n\nNo NPA weight data found').pack()
# Imaging frame
if self.spec._has_spectra and self.spec._has_geo:
ttk.Combobox(self.imaging_frame, textvariable = self.spec.lens,
values = list(self.spec.lenses.keys())).pack()
ttk.Checkbutton(self.imaging_frame,text = 'Exclude FIDA', variable = self.spec.fida_on_imaging,
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.imaging_frame,text = 'Exclude Full', variable = self.spec.full_on_imaging,
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.imaging_frame,text = 'Exclude Half', variable = self.spec.half_on_imaging,
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.imaging_frame,text = 'Exclude Third', variable = self.spec.third_on_imaging,
onvalue = False, offvalue = True).pack()
ttk.Checkbutton(self.imaging_frame,text = 'Exclude Halo', variable = self.spec.halo_on_imaging,
onvalue = False, offvalue = True).pack()
ttk.Label(self.imaging_frame, text = 'Wavelength Min (nm)').pack()
ttk.Entry(self.imaging_frame, textvariable = self.spec.wl_min_imaging, state = tk.NORMAL, width = 10).pack()
ttk.Label(self.imaging_frame, text = 'Wavelength Max (nm)').pack()
ttk.Entry(self.imaging_frame, textvariable = self.spec.wl_max_imaging, state = tk.NORMAL, width = 10).pack()
ttk.Button(self.imaging_frame, text = 'Reset Wavelength',\
command = (lambda: self.spec.reset_wave_imaging())).pack(side = tk.TOP)
ttk.Button(self.imaging_frame, text = 'Plot Image',\
command = (lambda: self.spec.plot_spec_image(self.fig, self.canvas))).pack(side = tk.TOP, expand = tk.Y, fill = tk.BOTH)
ttk.Button(self.imaging_frame, text = 'Plot Brems',\
command = (lambda: self.spec.plot_brems_image(self.fig, self.canvas))).pack(side = tk.TOP, expand = tk.Y, fill = tk.BOTH)
# ttk.Label(self.imaging_frame, text = 'Projection Distance (cm)').pack()
# ttk.Entry(self.imaging_frame, textvariable = self.spec.projection_dist, state = tk.NORMAL, width = 10).pack()
else:
ttk.Label(self.imaging_frame, text = '\n\nNo imaging data found').pack()
def read_nml(self, filename):
nml = f90nml.read(filename)['fidasim_inputs']
# Use nml dir if results_dir is invalid
if not os.path.isdir(nml['result_dir']):
nml['result_dir'] = os.path.dirname(filename)
if not os.path.isfile(nml['geometry_file']):
nml['geometry_file'] = os.path.join(nml['result_dir'],os.path.basename(nml['geometry_file']))
return nml
def load_namelist(self):
self.namelistfile = askopenfilename(filetypes=[('Namelist Files','*.dat')])
self.nml = self.read_nml(self.namelistfile)
self.spec = Spectra(self.nml)
self.npa = NPA(self.nml)
self.neut = Neutrals(self.nml)
self.wght = Weights(self.nml)
if __name__ == '__main__':
root = tk.Tk()
Viewer(root)
root.mainloop()
|
lstagner/FIDASIM-GUI
|
fidaviewer.py
|
Python
|
mit
| 78,953
|
[
"Gaussian"
] |
358d633ffe40e70117e50b0e1d73c7146ec54bd28ad58828da40d73f9e8be089
|
import numpy as np
from Wavelets import WaveletGenerator
def syntheticSeismogram(v, rho, d, wavtyp='RICKER', wavf=[100], usingT=False, maxDepth=500, plotIt=False):
"""
syntheticSeismogram generates and displays a synthetic seismogram for
a simple 1-D layered model.
Inputs:
v : velocity of each layer (m/s)
rho : density of each layer (kg/m^3)
d : depth to the top of each layer (m)
The last layer is assumed to be a half-space
wavtyp : type of Wavelet
The wavelet options are:
Ricker: takes one frequency
Gaussian: still in progress
Ormsby: takes 4 frequencies
Klauder: takes 2 frequencies
usingT :
Lindsey Heagy
lheagy@eos.ubc.ca
Created: November 30, 2013
Modified: January 16, 2013
v = np.array([350, 1000, 2000]) # Velocity of each layer (m/s)
rho = np.array([1700, 2000, 2500]) # Density of each layer (kg/m^3)
d = np.array([0, 100, 200]) # Position of top of each layer (m)
"""
# Ensure that these are float numpy arrays
v, rho, d , wavf = np.array(v, dtype=float), np.array(rho, dtype=float), np.array(d, dtype=float), np.array(wavf,dtype=float)
usingT = np.array(usingT, dtype=bool)
nlayer = len(v) # number of layers
# Check that the number of layers match
assert len(rho) == nlayer, 'Number of layer densities must match number of layer velocities'
assert len(d) == nlayer, 'Number of layer tops must match the number of layer velocities'
# compute necessary parameters
Z = rho*v # acoustic impedance
R = np.diff(Z)/(Z[:-1] + Z[1:]) # reflection coefficients
twttop = 2*np.diff(d)/v[:-1] # 2-way travel time within each layer
twttop = np.cumsum(twttop) # 2-way travel time from surface to top of each layer
# create model logs
resolution = 400 # How finely we discretize in depth
dpth = np.linspace(0,maxDepth,resolution) # create depth vector
nd = len(dpth)
# Initialize logs
rholog = np.zeros(nd) # density
vlog = np.zeros(nd) # velocity
zlog = np.zeros(nd) # acoustic impedance
rseries = np.zeros(nd) # reflectivity series
t = np.zeros(nd) # time
# Loop over layers to put information in logs
for i in range(nlayer):
di = (dpth >= d[i]) # current depth indicies
rholog[di] = rho[i] # density
vlog[di] = v[i] # velocity
zlog[di] = Z[i] # acoustic impedance
if i < nlayer-1:
di = np.logical_and(di, dpth < d[i+1])
ir = np.arange(resolution)[di][-1:][0]
if usingT:
if i == 0:
rseries[ir] = R[i]
else:
rseries[ir] = R[i]*np.prod(1-R[i-1]**2)
else:
rseries[ir] = R[i]
if i > 0:
t[di] = 2*(dpth[di] - d[i])/v[i] + twttop[i-1]
else:
t[di] = 2*dpth[di]/v[i]
# make wavelet
dtwav = np.abs(np.min(np.diff(t)))/10.0
twav = np.arange(-2.0/np.min(wavf), 2.0/np.min(wavf), dtwav)
# Get source wavelet
wav = WaveletGenerator(wavtyp,wavf,twav)
# create synthetic seismogram
tref = np.arange(0,np.max(t),dtwav) + np.min(twav) # time discretization for reflectivity series
tr = t[np.abs(rseries) > 0]
rseriesconv = np.zeros(len(tref))
for i in range(len(tr)):
index = np.abs(tref - tr[i]).argmin()
rseriesconv[index] = R[i]
seis = np.convolve(wav,rseriesconv)
tseis = np.min(twav)+dtwav*np.arange(len(seis))
index = np.logical_and(tseis >= 0, tseis <= np.max(t))
tseis = tseis[index]
seis = seis[index]
if plotIt:
import matplotlib.pyplot as plt
plt.figure(1)
# Plot Density
plt.subplot(151)
plt.plot(rholog,dpth,linewidth=2)
plt.title('Density')
# xlim([min(rholog) max(rholog)] + [-1 1]*0.1*[max(rholog)-min(rholog)])
# ylim([min(dpth),max(dpth)])
# set(gca,'Ydir','reverse')
plt.grid()
plt.subplot(152)
plt.plot(vlog,dpth,linewidth=2)
plt.title('Velocity')
# xlim([min(vlog) max(vlog)] + [-1 1]*0.1*[max(vlog)-min(vlog)])
# ylim([min(dpth),max(dpth)])
# set(gca,'Ydir','reverse')
plt.grid()
plt.subplot(153)
plt.plot(zlog,dpth,linewidth=2)
plt.title('Acoustic Impedance')
# xlim([min(zlog) max(zlog)] + [-1 1]*0.1*[max(zlog)-min(zlog)])
# ylim([min(dpth),max(dpth)])
# set(gca,'Ydir','reverse')
plt.grid()
plt.subplot(154)
plt.hlines(dpth,np.zeros(nd),rseries,linewidth=2) #,'marker','none'
plt.title('Reflectivity Series');
# set(gca,'cameraupvector',[-1, 0, 0]);
plt.grid()
# set(gca,'ydir','reverse');
plt.subplot(155)
plt.plot(t,dpth,linewidth=2);
plt.title('Depth-Time');
# plt.xlim([np.min(t), np.max(t)] + [-1, 1]*0.1*[np.max(t)-np.min(t)]);
# plt.ylim([np.min(dpth),np.max(dpth)]);
# set(gca,'Ydir','reverse');
plt.grid()
##
plt.figure(2)
# plt.subplot(141)
# plt.plot(dpth,t,linewidth=2);
# title('Time-Depth');
# ylim([min(t), max(t)] + [-1 1]*0.1*[max(t)-min(t)]);
# xlim([min(dpth),max(dpth)]);
# set(gca,'Ydir','reverse');
# plt.grid()
plt.subplot(132)
plt.hlines(tref,np.zeros(len(rseriesconv)),rseriesconv,linewidth=2) #,'marker','none'
plt.title('Reflectivity Series')
# set(gca,'cameraupvector',[-1, 0, 0])
plt.grid()
plt.subplot(131)
plt.plot(wav,twav,linewidth=2)
plt.title('Wavelet')
plt.grid()
# set(gca,'ydir','reverse')
plt.subplot(133)
plt.plot(seis,tseis,linewidth=2)
plt.grid()
# set(gca,'ydir','reverse')
plt.show()
return dpth, t, seis, tseis
if __name__ == '__main__':
d = [0, 50, 100] # Position of top of each layer (m)
v = [350, 1000, 2000] # Velocity of each layer (m/s)
rho = [1700, 2000, 2500] # Density of each layer (kg/m^3)
syntheticSeismogram(v, rho, d, maxDepth=250, plotIt=True)
|
jaabell/Seismogram
|
Layers.py
|
Python
|
mit
| 6,511
|
[
"Gaussian"
] |
210aad2cbb9b61a9907c405e53b9aa8667d12c0ea3225c415001f67a8e448f53
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.