text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
# Generic vector and matrix routines for 3-Space
# Assembled for usage in PyMOL and Chemical Python
#
# Assumes row-major matrices and arrays
# [ [vector 1], [vector 2], [vector 3] ]
#
# Raises ValueError when given bad input
#
# TODO: documentation!
import math
import random
import copy
RSMALL4 = 0.0001
#------------------------------------------------------------------------------
def get_null():
return [0.0,0.0,0.0]
#------------------------------------------------------------------------------
def get_identity():
return [[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0]]
#------------------------------------------------------------------------------
def distance_sq(v1, v2):
d0 = v2[0] - v1[0]
d1 = v2[1] - v1[1]
d2 = v2[2] - v1[2]
return (d0*d0) + (d1*d1) + (d2*d2)
#------------------------------------------------------------------------------
def distance(v1, v2):
d0 = v2[0] - v1[0]
d1 = v2[1] - v1[1]
d2 = v2[2] - v1[2]
return math.sqrt((d0*d0) + (d1*d1) + (d2*d2))
#------------------------------------------------------------------------------
def length(v):
return math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
#------------------------------------------------------------------------------
def random_displacement(v,radius):
r_vect = lambda r=random.random:[r()-0.5,r()-0.5,r()-0.5]
while 1:
vect = r_vect()
v_len = length(vect)
if (v_len<=0.5):
break;
if v_len > 0.00000000001:
v_len = random.random()*radius / v_len
return add(v,scale([vect[0], vect[1], vect[2]],v_len))
else:
return v
#------------------------------------------------------------------------------
def random_sphere(v,radius):
r_vect = lambda r=random.random:[r()-0.5,r()-0.5,r()-0.5]
while 1:
vect = r_vect()
v_len = length(vect)
if (v_len<=0.5) and (v_len!=0.0):
break;
return add(v,scale([vect[0], vect[1], vect[2]],2*radius/v_len))
#------------------------------------------------------------------------------
def random_vector():
r_vect = lambda r=random.random:[r()-0.5,r()-0.5,r()-0.5]
while 1:
vect = r_vect()
if length(vect)<=0.5:
break;
return scale([vect[0], vect[1], vect[2]],2.0)
#------------------------------------------------------------------------------
def add(v1,v2):
return [v1[0]+v2[0],v1[1]+v2[1],v1[2]+v2[2]]
#------------------------------------------------------------------------------
def average(v1,v2):
return [(v1[0]+v2[0])/2.0,(v1[1]+v2[1])/2.0,(v1[2]+v2[2])/2.0]
#------------------------------------------------------------------------------
def scale(v,factor):
return [v[0]*factor,v[1]*factor,v[2]*factor]
#------------------------------------------------------------------------------
def negate(v):
return [-v[0],-v[1],-v[2]]
#------------------------------------------------------------------------------
def sub(v1,v2):
return [v1[0]-v2[0],v1[1]-v2[1],v1[2]-v2[2]]
#------------------------------------------------------------------------------
def dot_product(v1,v2):
return v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]
#------------------------------------------------------------------------------
def cross_product(v1,v2):
return [(v1[1]*v2[2]) - (v1[2]*v2[1]),
(v1[2]*v2[0]) - (v1[0]*v2[2]),
(v1[0]*v2[1]) - (v1[1]*v2[0])]
#------------------------------------------------------------------------------
def transform(m,v):
return [m[0][0]*v[0] + m[0][1]*v[1] + m[0][2]*v[2],
m[1][0]*v[0] + m[1][1]*v[1] + m[1][2]*v[2],
m[2][0]*v[0] + m[2][1]*v[1] + m[2][2]*v[2]]
#------------------------------------------------------------------------------
def inverse_transform(m,v):
return [m[0][0]*v[0] + m[1][0]*v[1] + m[2][0]*v[2],
m[0][1]*v[0] + m[1][1]*v[1] + m[2][1]*v[2],
m[0][2]*v[0] + m[1][2]*v[1] + m[2][2]*v[2]]
#------------------------------------------------------------------------------
def multiply(m1,m2): # HAVEN'T YET VERIFIED THAT THIS CONFORMS TO STANDARD DEFT
return [[m1[0][0]*m2[0][0] + m1[0][1]*m2[1][0] + m1[0][2]*m2[2][0],
m1[1][0]*m2[0][0] + m1[1][1]*m2[1][0] + m1[1][2]*m2[2][0],
m1[2][0]*m2[0][0] + m1[2][1]*m2[1][0] + m1[2][2]*m2[2][0]],
[m1[0][0]*m2[0][1] + m1[0][1]*m2[1][1] + m1[0][2]*m2[2][1],
m1[1][0]*m2[0][1] + m1[1][1]*m2[1][1] + m1[1][2]*m2[2][1],
m1[2][0]*m2[0][1] + m1[2][1]*m2[1][1] + m1[2][2]*m2[2][1]],
[m1[0][0]*m2[0][2] + m1[0][1]*m2[1][2] + m1[0][2]*m2[2][2],
m1[1][0]*m2[0][2] + m1[1][1]*m2[1][2] + m1[1][2]*m2[2][2],
m1[2][0]*m2[0][2] + m1[2][1]*m2[1][2] + m1[2][2]*m2[2][2]]]
#------------------------------------------------------------------------------
def transpose(m1):
return [[m1[0][0],
m1[1][0],
m1[2][0]],
[m1[0][1],
m1[1][1],
m1[2][1]],
[m1[0][2],
m1[1][2],
m1[2][2]]]
#------------------------------------------------------------------------------
def get_system2(x,y):
z = cross_product(x,y)
z = normalize(z)
y = cross_product(z,x);
y = normalize(y);
x = normalize(x);
return [x,y,z]
#------------------------------------------------------------------------------
def scale_system(s,factor):
r = []
for a in s:
r.append([a[0]*factor,a[1]*factor,a[2]*factor])
return r
#------------------------------------------------------------------------------
def transpose(m):
return [[m[0][0], m[1][0], m[2][0]],
[m[0][1], m[1][1], m[2][1]],
[m[0][2], m[1][2], m[2][2]]]
#------------------------------------------------------------------------------
def transform_about_point(m,v,p):
return add(transform(m,sub(v,p)),p)
#------------------------------------------------------------------------------
def get_angle(v1,v2): # v1,v2 must be unit vectors
denom = (math.sqrt(((v1[0]*v1[0]) + (v1[1]*v1[1]) + (v1[2]*v1[2]))) *
math.sqrt(((v2[0]*v2[0]) + (v2[1]*v2[1]) + (v2[2]*v2[2]))))
if denom>1e-10:
result = ( (v1[0]*v2[0]) + (v1[1]*v2[1]) + (v1[2]*v2[2]) ) / denom
else:
result = 0.0
result = math.acos(result)
return result
#------------------------------------------------------------------------------
def get_angle_formed_by(p1,p2,p3): # angle formed by three positions in space
# based on code submitted by Paul Sherwood
r1 = distance(p1,p2)
r2 = distance(p2,p3)
r3 = distance(p1,p3)
small = 1.0e-10
if (r1 + r2 - r3) < small:
# This seems to happen occasionally for 180 angles
theta = math.pi
else:
theta = math.acos( (r1*r1 + r2*r2 - r3*r3) / (2.0 * r1*r2) )
return theta;
#------------------------------------------------------------------------------
def project(v,n):
dot = v[0]*n[0] + v[1]*n[1] + v[2]*n[2]
return [ dot * n[0], dot * n[1], dot * n[2] ]
#------------------------------------------------------------------------------
def remove_component(v, n):
dot = v[0]*n[0] + v[1]*n[1] + v[2]*n[2]
return [v[0] - dot * n[0], v[1] - dot * n[1], v[2] - dot * n[2]]
#------------------------------------------------------------------------------
def normalize(v):
vlen = math.sqrt((v[0]*v[0]) + (v[1]*v[1]) + (v[2]*v[2]))
if vlen>RSMALL4:
return [v[0]/vlen,v[1]/vlen,v[2]/vlen]
else:
return get_null()
#------------------------------------------------------------------------------
def reverse(v):
return [ -v[0], -v[1], -v[2] ]
#------------------------------------------------------------------------------
def normalize_failsafe(v):
vlen = math.sqrt((v[0]*v[0]) + (v[1]*v[1]) + (v[2]*v[2]))
if vlen>RSMALL4:
return [v[0]/vlen,v[1]/vlen,v[2]/vlen]
else:
return [1.0,0.0,0.0]
#------------------------------------------------------------------------------
def rotation_matrix(angle,axis):
x=axis[0]
y=axis[1]
z=axis[2]
s = math.sin(angle)
c = math.cos(angle)
mag = math.sqrt( x*x + y*y + z*z )
if abs(mag)<RSMALL4:
return get_identity()
x = x / mag
y = y / mag
z = z / mag
xx = x * x
yy = y * y
zz = z * z
xy = x * y
yz = y * z
zx = z * x
xs = x * s
ys = y * s
zs = z * s
one_c = 1.0 - c
return [[ (one_c * xx) + c , (one_c * xy) - zs, (one_c * zx) + ys],
[ (one_c * xy) + zs, (one_c * yy) + c , (one_c * yz) - xs],
[ (one_c * zx) - ys, (one_c * yz) + xs, (one_c * zz) + c ]]
#------------------------------------------------------------------------------
def transform_array(rot_mtx,vec_array):
'''transform_array( matrix, vector_array ) -> vector_array
'''
return map( lambda x,m=rot_mtx:transform(m,x), vec_array )
#------------------------------------------------------------------------------
def translate_array(trans_vec,vec_array):
'''translate_array(trans_vec,vec_array) -> vec_array
Adds 'mult'*'trans_vec' to each element in vec_array, and returns
the translated vector.
'''
return map ( lambda x,m=trans_vec:add(m,x),vec_array )
#------------------------------------------------------------------------------
def fit_apply(fit_result,vec_array):
'''fit_apply(fir_result,vec_array) -> vec_array
Applies a fit result to an array of vectors
'''
return map( lambda x,t1=fit_result[0],mt2=negate(fit_result[1]),
m=fit_result[2]: add(t1,transform(m,add(mt2,x))),vec_array)
#------------------------------------------------------------------------------
def fit(target_array, source_array):
'''fit(target_array, source_array) -> (t1, t2, rot_mtx, rmsd) [fit_result]
Calculates the translation vectors and rotation matrix required
to superimpose source_array onto target_array. Original arrays are
not modified. NOTE: Currently assumes 3-dimensional coordinates
t1,t2 are vectors from origin to centers of mass...
'''
# Check dimensions of input arrays
if len(target_array) != len(source_array):
print ("Error: arrays must be of same length for RMS fitting.")
raise ValueError
if len(target_array[0]) != 3 or len(source_array[0]) != 3:
print ("Error: arrays must be dimension 3 for RMS fitting.")
raise ValueError
nvec = len(target_array)
ndim = 3
maxiter = 200
tol = 0.001
# Calculate translation vectors (center-of-mass).
t1 = get_null()
t2 = get_null()
tvec1 = get_null()
tvec2 = get_null()
for i in range(nvec):
for j in range(ndim):
t1[j] = t1[j] + target_array[i][j]
t2[j] = t2[j] + source_array[i][j]
for j in range(ndim):
t1[j] = t1[j] / nvec
t2[j] = t2[j] / nvec
# Calculate correlation matrix.
corr_mtx = []
for i in range(ndim):
temp_vec = []
for j in range(ndim):
temp_vec.append(0.0)
corr_mtx.append(temp_vec)
rot_mtx = []
for i in range(ndim):
temp_vec = []
for j in range(ndim):
temp_vec.append(0.0)
rot_mtx.append(temp_vec)
for i in range(ndim):
rot_mtx[i][i] = 1.
for i in range(nvec):
for j in range(ndim):
tvec1[j] = target_array[i][j] - t1[j]
tvec2[j] = source_array[i][j] - t2[j]
for j in range(ndim):
for k in range(ndim):
corr_mtx[j][k] = corr_mtx[j][k] + tvec2[j]*tvec1[k]
# Main iteration scheme (hardwired for 3X3 matrix, but could be extended).
iters = 0
while (iters < maxiter):
iters = iters + 1
ix = (iters-1)%ndim
iy = iters%ndim
iz = (iters+1)%ndim
sig = corr_mtx[iz][iy] - corr_mtx[iy][iz]
gam = corr_mtx[iy][iy] + corr_mtx[iz][iz]
sg = (sig**2 + gam**2)**0.5
if sg != 0.0 and (abs(sig) > tol*abs(gam)):
sg = 1.0 / sg
for i in range(ndim):
bb = gam*corr_mtx[iy][i] + sig*corr_mtx[iz][i]
cc = gam*corr_mtx[iz][i] - sig*corr_mtx[iy][i]
corr_mtx[iy][i] = bb*sg
corr_mtx[iz][i] = cc*sg
bb = gam*rot_mtx[iy][i] + sig*rot_mtx[iz][i]
cc = gam*rot_mtx[iz][i] - sig*rot_mtx[iy][i]
rot_mtx[iy][i] = bb*sg
rot_mtx[iz][i] = cc*sg
else:
# We have a converged rotation matrix. Calculate RMS deviation.
vt1 = translate_array(negate(t1),target_array)
vt2 = translate_array(negate(t2),source_array)
vt3 = transform_array(rot_mtx,vt2)
rmsd = 0.0
for i in range(nvec):
rmsd = rmsd + distance_sq(vt1[i], vt3[i])
rmsd = math.sqrt(rmsd/nvec)
return(t1, t2, rot_mtx, rmsd)
# Too many iterations; something wrong.
print ("Error: Too many iterations in RMS fit.")
raise ValueError
| gratefulfrog/lib | python/chempy/cpv.py | Python | gpl-2.0 | 13,830 | [
"PyMOL"
] | d02b178a9732d4e3cde7b1e5c8066f1d58e0292f0c802c28fd1e563b571110d0 |
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
#
# Kovan's OGBot
# Copyright (c) 2007 by kovan
#
# *************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# *************************************************************************
#
import locale
import threading
import re
import os
import urllib
import types
import cPickle
import socket
import urllib2
import cookielib
import copy
import sys
import httplib
import warnings
import cookielib
import gzip
import math
from Queue import *
from time import strptime, time
time2 = time
from datetime import datetime, time
from cStringIO import *
from ClientForm import HTMLForm, ParseFile, ControlNotFoundError;
import keepalive
from CommonClasses import *
from Constants import *
from GameEntities import *
class WebAdapter(object):
"""Encapsulates the details of the communication with the ogame servers. This involves
HTTP protocol encapsulation and HTML parsing.
"""
class EventManager(BaseEventManager):
def __init__(self, gui = None):
super(WebAdapter.EventManager, self).__init__(gui)
def connectionError(self, reason):
self.logAndPrint("** CONNECTION ERROR: %s" % reason)
self.dispatch("connectionError", reason)
def loggedIn(self, username, session):
msg = 'Logged in with user %s.' % username
self.logAndPrint(msg)
msg = datetime.now().strftime("%X %x ") + msg
self.dispatch("activityMsg", msg)
def activityMsg(self, msg):
self.logAndPrint(msg)
msg = datetime.now().strftime("%X %x ") + msg
self.dispatch("activityMsg", msg)
def __init__(self, config, checkThreadMsgsMethod, gui = None):
self.server = ''
self.lastFetchedUrl = ''
self.serverCharset = ''
self.config = config
self.gui = gui
self.cookie = None
self.checkThreadMsgsMethod = checkThreadMsgsMethod
self._eventMgr = WebAdapter.EventManager(gui)
self.serverTimeDelta = None
self._mutex = threading.RLock()
self.scanThread = None
self.myPlanetsResources = []
self.currentTime = 0
self.webpage = self.config.webpage
self.session = '000000000000'
socket.setdefaulttimeout(20.0)
self.cookie = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
self.keepAliveOpener = urllib2.build_opener(keepalive.HTTPHandler())
if self.config.proxy:
proxyHandler = urllib2.ProxyHandler({"http":"http://"+self.config.proxy})
self.opener.add_handler(proxyHandler)
self.keepAliveOpener.add_handler(proxyHandler)
headers = [('User-agent', self.config.userAgent), ('Keep-Alive', "300"), ('Accept-Encoding', "gzip,deflate")]
self.opener.addheaders = headers
self.keepAliveOpener.addheaders = headers
self.serverLanguage = config.language
#print 'self.serverLanguage', self.serverLanguage
def generateRegexps(self, translations):
if translations:
reportTmp = r'<input type="checkbox" name="delmes[0-9]+.*?</th>.*?<th>(?P<date>[0-9].*?)</th>.*?<a href.*?>(?P<coords>\[[0-9:]+\]).*?</a>(?P<planetName>[^<]*?)</span>'
reportTmp += r'.*?<tr><td>.*?</td><td>(?P<metal>[-0-9.]+)</td>\n'
reportTmp += r'<td>.*?</td><td>(?P<crystal>[-0-9.]+)</td></tr>\n'
reportTmp += r'<tr><td>.*?</td><td>(?P<deuterium>[-0-9.]+)</td>\n'
reportTmp += r'<td>.*?</td><td>(?P<energy>[-0-9.]+)</td></tr>'
reportTmp2 = r'<table width=[0-9]+><tr><td class=c colspan=4>%s(.*?)</table>'
self.REGEXP_COORDS_STR = r"([1-9]{1,3}):([0-9]{1,3}):([0-9]{1,2})"
self.REGEXP_SESSION_STR = r"[0-9a-zA-Z]{12}"
self.REGEXPS = \
{
'logincheck': re.compile(r'<!-- GFAnalytics -->.*?src="(?P<url1>.*?)".*?<img.*?src="(?P<url2>.*?)"', re.DOTALL |re.I),
'messages.php': re.compile(r'<input type="checkbox" name="delmes(?P<code>[0-9]+).*?(?=<input type="checkbox")', re.DOTALL |re.I),
'fleetSendError':re.compile(r'<span class="error">(?P<error>.*?)</span>', re.I),
'myPlanets':re.compile('<option value="/game/index\.php\?page=overview&session='+self.REGEXP_SESSION_STR+'&cp=([0-9]+)&mode=&gid=&messageziel=&re=0" (?:selected)?>(.*?)<.*?\['+self.REGEXP_COORDS_STR+'].*?</option>', re.I),
'report':
{
'all' : re.compile(reportTmp, re.DOTALL|re.LOCALE|re.I),
'fleet': re.compile(reportTmp2 % translations['fleets'], re.DOTALL|re.I),
'defense': re.compile(reportTmp2 % translations['defense'], re.DOTALL|re.I),
'buildings':re.compile(reportTmp2 % translations['buildings'], re.DOTALL|re.I),
'research': re.compile(reportTmp2 % translations['research'], re.DOTALL|re.I),
'details': re.compile(r"<td>(?P<type>.*?)</td><td>(?P<cuantity>[-0-9.]+)</td>", re.DOTALL|re.I)
},
'serverTime':re.compile(r"<th>.*?%s.*?</th>.*?<th.*?>(?P<date>.*?)</th>" % translations['serverTime'], re.DOTALL|re.I),
'availableFleet':re.compile(r'name="maxship(?P<type>[0-9]{3})" value="(?P<cuantity>[-0-9.]+)"', re.I),
'maxSlots':re.compile(r"%s\s*([0-9]+).*?([0-9]+)" % translations['maxFleets'], re.I),
'researchLevels':re.compile(r">(?P<techName>[^<]+)</a></a>\s*\(.*?(?P<level>\d+)\s*\)", re.I|re.LOCALE),
'fleetSendResult':re.compile(r"<tr.*?>\s*<th.*?>(?P<name>.*?)</th>\s*<th.*?>(?P<value>.*?)</th>", re.I),
'charset':re.compile(r'content="text/html; charset=(.*?)"', re.I),
# number, name, ownerStatus, owner, alliance
'solarSystem':re.compile(r'<tr>.*?<a href="#" tabindex="\d+" >(\d+)</a>.*?<th width="130".*?>[\s]*(?:<a.*?>)?[\s]*([^&<]+).*?<th width="150">.*?<span class="(\w+?)">(.*?)</span>.*?<th width="80">.*?">[\s]*([\w .]*?) *<.*?</tr>'),
'stats': re.compile(r"<th>(?:<font color='87CEEB'>)?([^<]+)(?:</font>)?</th>.*?<th>([0-9.]+)</th>",re.DOTALL),
'resources':re.compile(r'<td align="center" class=\'header\' width="90"><font .*?>([-0-9.]+)</font></td>',re.I),
'missions':re.compile(r'<th>[0-9]+</th>.*?<a title=".*?">([^<]+)</a>.*?<th> <a title="([^"]+)">[0-9]+</a></th>.*?<a href="index.php\?page=galaxy.*?>\[([0-9:]+)\]</a></th>.*?<th>([a-zA-Z]{3} [a-zA-Z]{3} [0-9]+ [:0-9]+)</th>.*?<th><a href="index.php\?page=galaxy.*?>\[([0-9:]+)\]</a>.*?<th>([a-zA-Z]{3} [a-zA-Z]{3} [0-9]+ [:0-9]+)</th>', re.DOTALL|re.I),
'techtree':re.compile(r'<table width="100%" border=0 cellspacing=0 cellpadding=0><tr><td align=left>.*?session=(?P<session>[0-9a-zA-Z]{12}).*?gid=(?P<id>\d+)">(?P<namd>.*?)</a>', re.DOTALL |re.I)
}
else:
self.REGEXP_SESSION_STR = r"[0-9a-zA-Z]{12}"
self.REGEXPS = \
{
'logincheck': re.compile(r'<!-- GFAnalytics -->.*?src="(?P<url1>.*?)".*?<img.*?src="(?P<url2>.*?)"', re.DOTALL |re.I),
'techtree':re.compile(r'<table width="100%" border=0 cellspacing=0 cellpadding=0><tr><td align=left>.*?session=(?P<session>[0-9a-zA-Z]{12}).*?gid=(?P<id>\d+)">(?P<namd>.*?)</a>', re.DOTALL |re.I)
}
def setSession(self, value):
self._session = value
self.saveState()
def getSession(self):
return self._session
session = property(getSession, setSession)
def serverTime(self):
return self.serverTimeDelta + datetime.now()
def _fetchPhp(self, php, **params):
params['session'] = self.session
url = "http://%s/game/%s?%s" % (self.webpage, php, urllib.urlencode(params))
return self._fetchValidResponse(url)
def _fetchForm(self, form):
return self._fetchValidResponse(form.click())
def _fetchValidResponse(self, request, skipValidityCheck = False, changeLastUrl = True):
self.currentTime = 0
self._mutex.acquire()
if isinstance(request, str):
request = urllib2.Request(request)
if self.lastFetchedUrl:
request.add_header('Referer',self.lastFetchedUrl)
valid = False
while not valid:
self.currentTime = self.currentTime + 1
if self.currentTime > 4:
BotFatalError("Maybe in dead loop now!!")
break
valid = True
try:
response = self.opener.open(request)
tmpUrl = response.geturl()
if __debug__:
print >>sys.stderr, " Fetched " + tmpUrl
if changeLastUrl:
self.lastFetchedUrl = tmpUrl
cachedResponse = StringIO()
if response.info().has_key('Content-Encoding') and 'gzip' in response.info()['Content-Encoding'] :
zbuf = StringIO(response.read())
cachedResponse = StringIO(gzip.GzipFile(fileobj=zbuf,mode='rb').read())
else :
cachedResponse = StringIO(response.read())
p = cachedResponse.getvalue()
cachedResponse.seek(0)
if skipValidityCheck:
break
elif not p or 'errorcode=8' in self.lastFetchedUrl:
valid = False
elif self.translations['dbProblem'] in p or self.translations['untilNextTime'] in p or "Grund 5" in p:
valid = False
print 'Raise BotReset Error'
raise BotReset()
elif 'errorcode=2' in self.lastFetchedUrl:
raise BotFatalError("Invalid username and/or password.")
except urllib2.HTTPError, e:
if e.code == 302: # happens once in a while when user and bot are playing simultaneusly.
raise BotError()
else: raise e
except (urllib2.URLError, httplib.IncompleteRead, httplib.BadStatusLine), e:
self._eventMgr.connectionError(e)
valid = False
except Exception, e:
if "timed out" in str(e):
self._eventMgr.connectionError("timed out")
valid = False
else: raise e
if not valid:
mySleep(5)
#print 'cookie', str(self.cookie)
cookie = re.findall(r"<Cookie\s([^ ]*)", str(self.cookie))
self.gui.setCookie(cookie)
self._mutex.release()
return cachedResponse
def doLogin(self):
if self.serverTimeDelta and self.serverTime().hour == 3 and self.serverTime().minute == 0:
mySleep(60)
allTranslations = Translations()
#First init translations from tranlate file
try:
self.translations = allTranslations[self.serverLanguage]
#First int re
except KeyError:
raise BotFatalError("Server language (%s) not supported by bot" % self.serverLanguage)
self.generateRegexps(translations=None)
self.server = "http://"+self.webpage+"/game/reg/login2.php?"+"v=2"+"&login="+self.config.username+"&pass="+self.config.password
#print self.server
page = self._fetchValidResponse(self.server, True).read()
try:
self.session = re.findall(self.REGEXP_SESSION_STR, page)[0]
except IndexError:
raise BotFatalError(page)
#print self.session
page = self._fetchPhp('index.php', page='overview', lgn=1).read()
url1, url2 = self.REGEXPS['logincheck'].findall(page)[0]
#print url1
#print url2
#somehow they are checking this!
self._fetchValidResponse(url1, skipValidityCheck = True, changeLastUrl = False)
self._fetchValidResponse(url2, skipValidityCheck = True, changeLastUrl = False)
#now from techtree to get auto trans for building/research/ship/defense name
page = self._fetchPhp('index.php', page='techtree').read()
for session,gid,local_name in self.REGEXPS['techtree'].findall(page):
self.translations[INGAME_TYPES_BY_CODE[int(gid)].name] = local_name
self.gui.translations = self.translations
self.translationsByLocalText = dict([ (value, key) for key, value in self.translations.items() ])
self.generateRegexps(self.translations)
self._eventMgr.loggedIn(self.config.username, self.session)
mySleep(5)
self.saveState()
def getMyPlanets(self):
page = self._fetchPhp('index.php', page='overview').read()
myPlanets = []
for code, name, galaxy, ss, pos in self.REGEXPS['myPlanets'].findall(page):
coords = Coords(galaxy, ss, pos)
for planet in myPlanets:
if planet.coords == coords: # we found a moon for this planet
coords.coordsType = Coords.Types.moon
planet = OwnPlanet(coords, name.strip(), code)
#print 'MyPlanets:', planet
myPlanets.append(planet)
if myPlanets:
strTime = self.REGEXPS['serverTime'].findall(page)[0]
serverTime = parseTime(strTime)
self.serverTimeDelta = serverTime - datetime.now()
self.serverCharset = self.REGEXPS['charset'].findall(page)[0]
self.myPlanets = myPlanets
else:
BotFatalError("No Planets are found in page overview")
return myPlanets
def readMessagePage(self):
page = self._fetchPhp('index.php', page='messages',dsp='1')
return page
def getEspionageReports(self):
page = self._fetchPhp('index.php', page='messages',dsp='1').read()
#print page
rawMessages = {}
for match in self.REGEXPS['messages.php'].finditer(page):
rawMessages[match.group('code')] = match.group(0)
reports = []
for code, rawMessage in rawMessages.items():
if 'class="espionagereport"' not in rawMessage:
#Some other messages
#print 'espionagereport not in rawMessage'
continue
#print rawMessage
m = self.REGEXPS['report']['all'].search(rawMessage)
if m == None: #theorically should never happen
warnings.warn("Error parsing espionage report.")
continue
planetName = m.group('planetName')
coords = Coords(m.group('coords'))
date = parseTime(m.group('date'), "%m-%d %H:%M:%S")
#print planetName
#print coords
#print date
resources = Resources(m.group('metal').replace('.', ''), m.group('crystal').replace('.', ''), m.group('deuterium').replace('.', ''))
report = EspionageReport(coords, planetName, date, resources, code)
for i in "fleet", "defense", "buildings", "research":
dict = None
match = self.REGEXPS['report'][i].search(rawMessage)
if match:
dict, text = {}, match.group(1)
for fullName, cuantity in self.REGEXPS['report']['details'].findall(text):
try :
dict[self.translationsByLocalText[fullName.strip()]] = int(cuantity.replace('.', ''))
except KeyError, e1 :
raise BotError("Unknown espionnage report string, check your translation file : %s", e1)
setattr(report, i, dict)
report.rawHtml = rawMessage
reports.append(report)
return reports
def buildBuildings(self, building, planet):
self._fetchPhp('index.php', page='b_building', bau=building.code, cp=planet.code)
def launchMission(self, mission, abortIfNotEnough = True, slotsToReserve = 0):
while True:
# assure cuantities are integers
for shipType, cuantity in mission.fleet.items():
mission.fleet[shipType] = int(cuantity)
# 1st step: select fleet
page = self._fetchPhp('index.php', page='flotten1', mode='Flotte', cp=mission.sourcePlanet.code)
pageText = page.read()
page.seek(0)
resource = self.getMyCurrentPlanetResources(pageText)
self.updateMyPlanetsResources(resource, mission.sourcePlanet)
#print 'In launchMisson,resource=',resource
mySleep(1);
#print 'self.getFreeSlots(pageText)', self.getFreeSlots(pageText)
#print 'int(slotsToReserve)', int(slotsToReserve)
if self.getFreeSlots(pageText) <= int(slotsToReserve):
#print 'Now raise NoFreeSlotsError in launchMission'
print 'Raise NoFreeSlotsError in line 399'
raise NoFreeSlotsError(self.getFlyingMissions(pageText))
break
availableFleet = self.getAvailableFleet(None, pageText)
form = ParseFile(page, self.lastFetchedUrl, backwards_compat=False)[-1]
for shipType, requested in mission.fleet.items():
available = availableFleet.get(shipType, 0)
if available == 0 or (abortIfNotEnough and available < requested):
raise NotEnoughShipsError(availableFleet, {shipType:requested},self.getFlyingMissions(pageText), available)
break
shipCode = 'ship'+str(INGAME_TYPES_BY_NAME[shipType].code)
form[shipCode] = str(requested)
# calculate mission consumption to check if there is enough deut on the planet
mission.consumption = 0
maxspeed = 9999999999
distance = mission.sourcePlanet.coords.distanceTo(mission.targetPlanet.coords)
for shipType, requested in mission.fleet.items() :
if INGAME_TYPES_BY_NAME[shipType].speed < maxspeed:
maxspeed = INGAME_TYPES_BY_NAME[shipType].speed
flightTime = mission.sourcePlanet.coords.flightTimeTo(mission.targetPlanet.coords, maxspeed, mission.speedPercentage)
for shipType, requested in mission.fleet.items() :
basicConsumption = INGAME_TYPES_BY_NAME[shipType].consumption * requested
#print 'self.config.gameSpeed', self.config.gameSpeed
spd = 35000.0 / (flightTime.seconds*int(self.config.gameSpeed) - 10) * math.sqrt(distance * 10.0 / float(INGAME_TYPES_BY_NAME[shipType].speed))
mission.consumption += (basicConsumption *distance/35000.0* ((spd / 10.0) + 1) * ((spd / 10.0) + 1))
mission.consumption = round(mission.consumption+1)
#print 'Now use getMyCurrentPlanetResources!'
resources = self.getMyCurrentPlanetResources(pageText)
#print "Consumption : Needed : " + str(consumption) +" Available : " + str(resources[2])
if mission.consumption > resources[2] :
raise NotEnoughDeutError(mission.consumption, resources[2])
break
# 2nd step: select destination and speed
page = self._fetchForm(form)
forms = ParseFile(page, self.lastFetchedUrl, backwards_compat=False)
if not forms or 'flotten3' not in forms[0].action:
continue
form = forms[0]
destCoords = mission.targetPlanet.coords
form['galaxy'] = str(destCoords.galaxy)
form['system'] = str(destCoords.solarSystem)
form['planet'] = str(destCoords.planet)
form['planettype']= [str(destCoords.coordsType)]
form['speed'] = [str(mission.speedPercentage / 10)]
# 3rd step: select mission and resources to carry
page = self._fetchForm(form)
form = None
pf = ParseFile(page, self.lastFetchedUrl, backwards_compat=False)
if len(pf) != 0 :
form = pf[0]
else :
continue
try:
form['order'] = [str(mission.missionType)]
except ControlNotFoundError:
continue
except:
continue
resources = mission.resources
form['resource1'] = str(resources.metal)
form['resource2'] = str(resources.crystal)
form['resource3'] = str(resources.deuterium)
# 4th and final step: check result
page = self._fetchForm(form).read()
if self.translations['fleetCouldNotBeSent'] in page:
continue
errors = self.REGEXPS['fleetSendError'].findall(page)
if len(errors) > 0 or 'class="success"' not in page:
errors = str(errors)
if self.translations['fleetLimitReached'] in errors:
print 'Raise NoFreeSlotsError in line 458'
raise NoFreeSlotsError(self.getFlyingMissions())
break
elif self.translations['noShipSelected'] in errors:
raise NotEnoughShipsError(availableFleet, mission.fleet)
break
else:
raise FleetSendError(errors)
continue
resultPage = {}
for type, value in self.REGEXPS['fleetSendResult'].findall(page):
resultPage[type] = value
# fill remaining mission fields
arrivalTime = parseTime(resultPage[self.translations['arrivalTime']])
returnTime = parseTime(resultPage[self.translations['returnTime']])
mission.flightTime = returnTime - arrivalTime
mission.launchTime = arrivalTime - mission.flightTime
mission.distance = int(resultPage[self.translations['distance']].replace('.', ''))
mission.consumption = int(resultPage[self.translations['consumption']].replace('.', ''))
sentFleet = {}
for fullName, value in resultPage.items():
name = self.translationsByLocalText.get(fullName)
if name is None:
continue
if name in INGAME_TYPES_BY_NAME.keys():
sentFleet[name] = int(value.replace('.', ''))
if mission.fleet != sentFleet:
warnings.warn("Not all requested fleet was sent. Requested: %s. Sent: %s" % (mission.fleet, sentFleet))
mission.fleet = sentFleet
mySleep(1);
page = self._fetchPhp('index.php', page='flotten1', mode='Flotte', cp=mission.sourcePlanet.code)
break
def getFreeSlots(self, alreadyFetchedPage = None):
page = alreadyFetchedPage
if not page:
page = self._fetchPhp('index.php', page='flotten1', mode='Flotte').read()
slots = []
slots = self.REGEXPS['maxSlots'].findall(page)
if slots:
slotsF, slotsM = slots[0]
maxFleets = int(slotsM)
usedSlots = int(slotsF)
print 'usedSlots,maxFleets',usedSlots, maxFleets
return maxFleets - usedSlots
else:
BotFatalError("Can not get slots info on page")
return 0
def getFlyingMissions(self,alreadyFetchedPage = None) :
page = alreadyFetchedPage
if not page:
page = self._fetchPhp('index.php', page='flotten1', mode='Flotte').read()
missions = []
#print 'missions', self.REGEXPS['missions'].findall(page)
for mission in self.REGEXPS['missions'].findall(page) :
try:
missionType = self.translationsByLocalText[mission[0]]
except KeyError, e1:
raise BotError("Unknown Fleet order, check your translation file : %s", e1)
if missionType == 'attack' :
missionType = Mission.Types.attack
elif missionType == 'attackgroup' :
missionType = Mission.Types.attackgroup
elif missionType == 'transport' :
missionType = Mission.Types.transport
elif missionType == 'deploy' :
missionType = Mission.Types.deploy
elif missionType == 'deployally' :
missionType == Mission.Types.deployally
elif missionType == 'spy' :
missionType = Mission.Types.spy
elif missionType == 'recycle' :
missionType = Mission.Types.recycle
elif missionType == 'destroy' :
missionType = Mission.Types.destroy
elif missionType == 'expedition':
missionType = Mission.Types.expedition
else:
missionType = Mission.Types.unknown
source = Planet(Coords(mission[2]))
destination = Planet(Coords(mission[4]))
fleet = {}
for ship in mission[1].split('\n') :
if ship != '' :
shiptype, quantity = ship.split(':')
try :
fleet[self.translationsByLocalText[shiptype]] = int(quantity)
except KeyError, e1 :
raise BotError("Unknown ship name, check your translation file : %s", e1)
m = Mission(missionType, source, destination, fleet)
maxspeed = 9999999999
for shipType, requested in m.fleet.items() :
if INGAME_TYPES_BY_NAME[shipType].speed < maxspeed:
maxspeed = INGAME_TYPES_BY_NAME[shipType].speed
m.launchTime = parseTime(mission[3])
#m.flightTime = parseTime(mission[5]) - m.launchTime
m.flightTime = source.coords.flightTimeTo(destination.coords, maxspeed)
#print 'm.launchTime', m.launchTime
#print 'm.flightTime', m.flightTime
missions.append(m)
return missions
def getTimeForFreeSlot(self, alreadyFetchedPage = None):
page = alreadyFetchedPage
if not page:
page = self._fetchPhp('index.php', page='flotten1', mode='Flotte').read()
times = re.findall(r"<th>([a-zA-Z]{3} [a-zA-Z]{3} [0-9]+ [:0-9]+)</th>", page)
time = None
if len(times)==0:
time = None
else:
time = parseTime(times[1])
return time
def getAvailableFleet(self, planet, alreadyFetchedPage = None):
page = alreadyFetchedPage
if not page:
page = self._fetchPhp('index.php', page='flotten1', mode='Flotte', cp=planet.code).read()
fleet = {}
for code, cuantity in self.REGEXPS['availableFleet'].findall(page):
fleet[INGAME_TYPES_BY_CODE[int(code)].name] = int(cuantity.replace('.', ''))
return fleet
def deleteMessages(self, messages):
page = self._fetchPhp('index.php', page='messages')
form = ParseFile(page, self.lastFetchedUrl, backwards_compat=False)[0]
for message in messages:
checkBoxName = "delmes" + message.code
try:
form[checkBoxName] = [None] # actually this marks the checbox as checked (!!)
form["deletemessages"] = ["deletemarked"]
except ControlNotFoundError:
if __debug__:
print >> sys.stderr, "Could not delete message " + str(message)
self._fetchForm(form)
def updateMyPlanetsResources(self, resource,planet):
if len(resource) < 3:
print 'Error when get resource'
return
resources = self.myPlanetsResources
inc = 5
for p in xrange(len(resources)/inc):
if str(resources[inc*p]).find(str(planet.coords)) != -1:
#print 'Find planet to p,update', p, planet
resources[inc*p+1] = resource[0]
resources[inc*p+2] = resource[1]
resources[inc*p+3] = resource[2]
resources[inc*p+4] = 'updated'
break
self.myPlanetsResources = resources
self.gui.myPlanetsResTableUpdate(self.myPlanetsResources)
def resetMyPlanetsResFlag(self):
resources = self.myPlanetsResources
inc = 5
for p in xrange(len(resources)/inc):
resources[inc*p+4] = 'no'
self.myPlanetsResources = resources
def getAndUpdateMyPlanetsResources(self):
if not self.myPlanetsResources:
for planet in self.myPlanets:
self.myPlanetsResources.append(str(planet))
self.myPlanetsResources.append(0)
self.myPlanetsResources.append(0)
self.myPlanetsResources.append(0)
self.myPlanetsResources.append('no')
resources = self.myPlanetsResources
inc = 5
for p in xrange(len(resources)/inc):
if resources[inc*p+4] == 'no':
for planet in self.myPlanets:
if str(resources[inc*p]).find(str(planet.coords)) != -1:
page = self.goToPlanet(planet)
new_resources = self.getMyCurrentPlanetResources(page)
resources[inc*p+1] = new_resources[0]
resources[inc*p+2] = new_resources[1]
resources[inc*p+3] = new_resources[2]
else:
resources[inc*p+4] = 'no'
mySleep(1)
self.myPlanetsResources = resources
self.gui.myPlanetsResTableUpdate(self.myPlanetsResources)
def getMyCurrentPlanetResources(self, webpage):
resources = []
resources = self.REGEXPS['resources'].findall(webpage)
i = 0
for i in xrange(len(resources)) :
resources[i] = int(resources[i].replace('.',''))
return resources
def getResearchLevels(self):
for planet in self.myPlanets:
page = self._fetchPhp('index.php', page='buildings', mode='Forschung',cp=planet.code).read()
page = page.replace("\n", "")
#print page
levels = {}
#tmp = self.REGEXPS['researchLevels'].findall(page)
#print 'tmp', tmp
for fullName, level in self.REGEXPS['researchLevels'].findall(page):
try :
levels[self.translationsByLocalText[fullName]] = int(level)
except KeyError, e1 :
raise BotError("Unknown research name, check your translation file : %s", e1)
#print levels
if 'impulseDrive' in levels or 'combustionDrive' in levels:
return levels
raise BotFatalError("Not enough technologies researched to run the bot")
def goToPlanet(self, planet):
page = self._fetchPhp('index.php', page='overview', cp=planet.code).read()
return page
def selectDeuteriumSourcePlanet(self):
find = False
for planet in self.myPlanets:
page = self.goToPlanet(planet)
resources = self.REGEXPS['resources'].findall(page)
if int(str(resources[2]).replace('.', ''))> 200:
print 'Source planet selected:', planet
find = True
break
if find == False:
raise BotFatalError("Probably there is not enough deuterium on all planets.")
def getSolarSystems(self, solarSystems): # solarsytems is an iterable of tuples
found = []
print solarSystems
for searchGalaxy, searchSolarSystem in solarSystems:
#print 'in getSolarSystems', galaxy, solarSystem
params = {'no_header':'1', 'session':self.session, 'galaxy':searchGalaxy, 'system':searchSolarSystem}
url = "http://%s/game/index.php?page=galaxy&%s" % (self.webpage, urllib.urlencode(params))
if __debug__:
print >>sys.stderr, " Fetched " + url
try:
response = self.opener.open(url)
if 'error' in response.geturl():
continue
except:
continue
page = None
try:
if response.info().has_key('Content-Encoding') and 'gzip' in response.info()['Content-Encoding'] :
zbuf = StringIO(response.read())
page = gzip.GzipFile(fileobj=zbuf,mode='rb').read()
else :
page = response.read()
except:
continue
page = page.replace("\n", "")
#print 'where', str(page).find(str(self.translations['noDeuterium']))
if str(page).find(str('<span class="error">')) != -1 and str(page).find(str(self.translations['noDeuterium'])) != -1:
#print 'error message',self.translations['noDeuterium']
raise BotFatalError("Probably there is not enough deuterium planet.")
else:
galaxy = re.findall('input type="text" name="galaxy" value="(\d+)"', page)[0]
solarSystem = re.findall('input type="text" name="system" value="(\d+)"', page)[0]
foundPlanets = []
for number, name, ownerStatus, owner, alliance in self.REGEXPS['solarSystem'].findall(page):
# Absolutely ALL EnemyPlanet objects of the bot are created here
if len(owner) != 0 :
planet = EnemyPlanet(Coords(galaxy, solarSystem, number), owner, ownerStatus, name, alliance)
foundPlanets.append(planet)
if foundPlanets:
found.append((searchGalaxy, searchSolarSystem,foundPlanets))
#print 'foundPlanets', foundPlanets
return found
def getStats(self, type): # type can be: pts for points, flt for fleets or res for research
page = self._fetchPhp('index.php', page='stat', start=1)
form = ParseFile(page, self.lastFetchedUrl, backwards_compat=False)[-1]
for i in range(1, 1401, 100):
form['type'] = [type]
form['start'] = [str(i)]
page = self._fetchForm(form).read()
for player, points in self.REGEXPS['stats'].findall(page):
yield player, int(points.replace('.', ''))
def saveState(self):
file = open(FILE_PATHS['webstate'], 'wb')
pickler = cPickle.Pickler(file, 2)
pickler.dump(self.webpage)
pickler.dump(self.session)
file.close()
def loadState(self):
try:
file = open(FILE_PATHS['webstate'], 'rb')
u = cPickle.Unpickler(file)
self.webpage = u.load()
self.session = u.load()
file.close()
except (EOFError, IOError):
try:
os.remove(FILE_PATHS['webstate'])
except Exception : pass
return False
return True
def parseTime(strTime, format = "%a %b %d %H:%M:%S"):# example: Mon Aug 7 21:08:52
''' parses a time string formatted in OGame's most usual format and
converts it to a datetime object'''
format = "%Y " + format
strTime = str(datetime.now().year) + " " +strTime
tuple = strptime(strTime, format)
return datetime(*tuple[0:6])
| tectronics/ogrobot | src/WebAdapter.py | Python | gpl-2.0 | 36,957 | [
"CRYSTAL",
"Galaxy",
"NAMD"
] | 66d2e311d42a1c2cb22e1a000073b14b910142a8e083cd4afec52e9b7ee6eaae |
#!/usr/bin/env python
import numpy
import netCDF4
import argparse
import math
def parseCommandLine():
"""
Parse the command line and invoke operations.
"""
parser = argparse.ArgumentParser(description=
'''
Concatenates monthly data files into a single netcdf file.
''',
epilog='Written by A.Adcroft, 2014. No support offered.')
parser.add_argument('inFiles', type=str,
metavar='FILE',
nargs='+',
help='''A netcdf file.''')
parser.add_argument('-a', type=str,
metavar='ANNUAL_FILE',
default=None,
help='''An annual climatology netcdf file to use below where monthly data is provided.''')
parser.add_argument('-o', type=str,
metavar='OUTFILE',
required=True,
help='''The concatenated output file.''')
cla = parser.parse_args()
concatenateFiles(cla.inFiles, cla.o, annualFile=cla.a)
def concatenateFiles(inFiles, outFile, annualFile=None):
"""
Scans input files and writes output file
"""
hi = netCDF4.Dataset(inFiles[0], 'r')
if annualFile is not None: ha = netCDF4.Dataset(annualFile, 'r')
# Create new file and record dimension
ho = netCDF4.Dataset(outFile, 'w', format='NETCDF3_CLASSIC')
ho.createDimension('time',None)
time = ho.createVariable('time','f4',['time'])
time.setncattr('long_name','month_number')
time.setncattr('standard_name','month_number')
time.setncattr('units','Month number')
time.setncattr('axis','T')
time.setncattr('description','Number of month in annual cycle, 1-12.')
# Copy dimensions
for d in hi.dimensions:
if (annualFile is not None) and (d in ha.dimensions) and len(ha.dimensions[d])>len(hi.dimensions[d]):
ho.createDimension(d, len(ha.dimensions[d]))
else:
ho.createDimension(d, len(hi.dimensions[d]))
# Copy global attributes
for a in hi.ncattrs():
ho.__setattr__(a ,hi.__getattr__(a))
# Create and copy variables
for v in hi.variables:
if len(hi.variables[v].shape)==1:
if (annualFile is not None) and (v in ha.variables) and len(ha.variables[v])>len(hi.variables[v]): hptr = ha
else: hptr = hi
hv = ho.createVariable(v, hi.variables[v].dtype, hptr.variables[v].dimensions)
hv[:] = hptr.variables[v][:]
else:
hv = ho.createVariable(v, hi.variables[v].dtype, [u'time']+list(hi.variables[v].dimensions))
# Copy variable attributes
for a in hi.variables[v].ncattrs():
hv.setncattr(a, hi.variables[v].__getattr__(a))
hi.close()
# For each file, copy data
nza = 0
for n,f in zip(range(len(inFiles)),inFiles):
hi = netCDF4.Dataset(f, 'r')
for v in ho.variables:
if 'time' in ho.variables[v].dimensions and len(ho.variables[v].dimensions)>1:
nzi = hi.variables[v].shape[-3]
if annualFile is not None: nza = ha.variables[v].shape[-3]
print ho.variables[v].shape, hi.variables[v].shape, n, nzi, nza
ho.variables[v][n,:nzi] = hi.variables[v][:]
if nza>nzi: ho.variables[v][n,nzi:] = ha.variables[v][nzi:]
ho.variables['time'][n] = n+1
hi.close()
ho.close()
# Invoke parseCommandLine(), the top-level prodedure
if __name__ == '__main__': parseCommandLine()
| adcroft/convert_WOA05 | python/concatenate_data.py | Python | mit | 3,180 | [
"NetCDF"
] | e6052b822f5eb35422cd661a12cb497a4cddb32e10b7132492bbda36643c631d |
"""
@name: Modules/House/Security/motion_sensor.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2019-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on aug 26, 2019
@Summary:
"""
__updated__ = '2019-12-29'
__version_info__ = (19, 8, 1)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
# Import PyMh files
from Modules.Core.Config.config_tools import Api as configApi
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.MotionSensor ')
CONFIG_NAME = 'motiondetectors'
class MotionDetectorInformation:
""" This is the motion detector data
==> PyHouse.House.Security.MotionDetector.xxx as in the def below
"""
def __init__(self):
self.Name = None
self.Comment = None
self.DeviceType = 'Security'
self.DeviceSubType = 'MotionDetector'
self.Family = None # FamilyInformation()
self.Room = None # RoomInformation()
self.Motion = None
self.Timeout = 0
class LocalConfig:
"""
"""
m_config = None
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_config = configApi(p_pyhouse_obj)
def _extract_one_motion_detector(self, p_config) -> dict:
""" Extract the config info for one button.
@param p_config: is the config fragment containing one button's information.
@return: a ButtonInformation() obj filled in.
"""
l_obj = MotionDetectorInformation()
l_required = ['Name', 'Family']
l_allowed = ['Room']
l_groupfields = ['Family', 'Room']
for l_key, l_value in p_config.items():
if l_key == 'Family':
l_obj.Family = self.m_config.extract_family_group(l_value)
elif l_key == 'Room':
l_obj.Room = self.m_config.extract_room_group(l_value)
pass
else:
setattr(l_obj, l_key, l_value)
# Check for required data missing from the config file.
for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]:
if getattr(l_obj, l_key) == None and l_key in l_required:
LOG.warning('Location Yaml is missing an entry for "{}"'.format(l_key))
LOG.info('Extracted Motion Detector "{}"'.format(l_obj.Name))
return l_obj
def _extract_all_motion_detectors(self, p_config):
""" Get all of the button sets configured
A Button set is a (mini-remote) with 4 or 8 buttons in the set
The set has one insteon address and each button is in a group
"""
l_dict = {}
for l_ix, l_sensor in enumerate(p_config):
l_sensor_obj = self._extract_one_motion_detector(l_sensor)
l_dict[l_ix] = l_sensor_obj
return l_dict
def load_yaml_config(self):
""" Read the lights.yaml file if it exists. No file = no lights.
It must contain 'Lights:'
All the lights are a list.
"""
LOG.info('Loading Config - Version:{}'.format(__version__))
l_yaml = self.m_config.read_config_file(CONFIG_NAME)
if l_yaml == None:
LOG.error('{}.yaml is missing.'.format(CONFIG_NAME))
return None
try:
l_yaml = l_yaml['Motion_Detectors']
except:
LOG.warning('The config file does not start with "Motion_Detectors:"')
return None
l_motion = self._extract_all_motion_detectors(l_yaml)
return l_motion
class Api:
"""
"""
m_pyhouse_obj = None
m_local_config = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self._add_storage()
self.m_local_config = LocalConfig(p_pyhouse_obj)
LOG.info("Initialized - Version:{}".format(__version__))
def _add_storage(self) -> None:
"""
"""
self.m_pyhouse_obj.House.Security.MotionDetectors = {}
def LoadConfig(self):
"""
"""
LOG.info('Load Config')
self.m_pyhouse_obj.House.Security.MotionDetectors = self.m_local_config.load_yaml_config()
def SaveConfig(self):
"""
"""
pass
def Start(self):
"""
"""
def Stop(self):
"""
"""
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/House/Security/Motiondetectors/motiondetectors.py | Python | mit | 4,434 | [
"Brian"
] | aed97e72c2ac86a38cb4c78ffcbe9ae27ead7dd1f552aafe1bcefcf9541ea032 |
"""Test buffer protocol for VTK arrays
Python 2.6 introduced a new buffer protocol that can expose raw
memory as a multi-dimensional array. This is used, for example,
by numpy in order to automatically generate arrays from memory
exposed by other python extension modules.
Created on Aug 1, 2015 by David Gobbi
"""
import sys
import struct
import vtk
from vtk.test import Testing
# array types and the corresponding format
lsize = vtk.VTK_SIZEOF_LONG
idsize = vtk.VTK_SIZEOF_ID_TYPE
if idsize == 8:
idchar = 'q'
else:
idchar = 'i'
arrayType = {
'SignedChar':('b', 1), 'UnsignedChar':('B', 1),
'Short':('h', 2), 'UnsignedShort':('H', 2),
'Int':('i', 4), 'UnsignedInt':('I', 4),
'Long':('l', lsize), 'UnsignedLong':('L', lsize),
'IdType':(idchar, idsize),
'LongLong':('q', 8), 'UnsignedLongLong':('Q', 8)
}
class TestBuffer(Testing.vtkTest):
def testOneDimensionalDataArray(self):
"""Test one-dimensional data array."""
if sys.hexversion < 0x02070000:
return
for atype,ainfo in arrayType.items():
aclass = getattr(vtk, 'vtk' + atype + 'Array')
a = aclass()
a.InsertNextValue(10)
a.InsertNextValue(7)
a.InsertNextValue(85)
m = memoryview(a)
self.assertEqual(m.format, ainfo[0])
self.assertEqual(m.itemsize, ainfo[1])
self.assertEqual(m.strides, (ainfo[1],))
self.assertEqual(m.shape, (3,))
self.assertEqual(m.ndim, 1)
# test the contents of the memoryview
tp = struct.unpack(3*ainfo[0], m.tobytes())
self.assertEqual(tp, (10, 7, 85))
# now test re-creating the array from a buffer
b = aclass()
b.SetVoidArray(m, 3, True)
self.assertEqual(b.GetValue(0), 10)
self.assertEqual(b.GetValue(1), 7)
self.assertEqual(b.GetValue(2), 85)
def testTwoDimensionalDataArray(self):
"""Test data array with components."""
if sys.hexversion < 0x02070000:
return
for atype,ainfo in arrayType.items():
aclass = getattr(vtk, 'vtk' + atype + 'Array')
a = aclass()
a.SetNumberOfComponents(3)
a.InsertNextTuple((10, 7, 4))
a.InsertNextTuple((85, 8, 2))
m = memoryview(a)
self.assertEqual(m.format, ainfo[0])
self.assertEqual(m.itemsize, ainfo[1])
self.assertEqual(m.shape, (2, 3))
self.assertEqual(m.strides, (ainfo[1]*3, ainfo[1]))
self.assertEqual(m.ndim, 2)
# test the contents of the memoryview
tp = struct.unpack(6*ainfo[0], m.tobytes())
self.assertEqual(tp, (10, 7, 4, 85, 8, 2))
def testCharArray(self):
"""Test the special case of the char array."""
if sys.hexversion < 0x02070000:
return
# bit array is actually stored as a byte array
a = vtk.vtkCharArray()
a.SetNumberOfComponents(5)
a.InsertNextTupleValue("hello")
a.InsertNextTupleValue("world")
m = memoryview(a)
self.assertEqual(m.format, 'c')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.shape, (2, 5))
self.assertEqual(m.strides, (5, 1))
self.assertEqual(m.ndim, 2)
# test the contents of the memoryview
self.assertEqual(m.tobytes(), b"helloworld")
def testBitArray(self):
"""Test the special case of the bit array."""
if sys.hexversion < 0x02070000:
return
# bit array is actually stored as a byte array
a = vtk.vtkBitArray()
a.InsertNextValue(0)
a.InsertNextValue(1)
a.InsertNextValue(1)
a.InsertNextValue(0)
a.InsertNextValue(1)
m = memoryview(a)
self.assertEqual(m.format, 'B')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.shape, (1,))
# test the contents of the memoryview
self.assertEqual(ord(m.tobytes()) & 0xF8, 0x68)
def testBufferShared(self):
"""Test the special buffer_shared() check that VTK provides."""
a = bytearray(b'hello')
self.assertEqual(vtk.buffer_shared(a, a), True)
b = bytearray(b'hello')
self.assertEqual(vtk.buffer_shared(a, b), False)
a = vtk.vtkFloatArray()
a.SetNumberOfComponents(3)
a.InsertNextTuple((10, 7, 4))
a.InsertNextTuple((85, 8, 2))
b = vtk.vtkFloatArray()
b.SetVoidArray(a, 6, True)
self.assertEqual(vtk.buffer_shared(a, b), True)
c = vtk.vtkFloatArray()
c.DeepCopy(a)
self.assertEqual(vtk.buffer_shared(a, c), False)
if sys.hexversion >= 0x02070000:
m = memoryview(a)
self.assertEqual(vtk.buffer_shared(a, m), True)
if sys.hexversion < 0x03000000:
m = buffer(a)
self.assertEqual(vtk.buffer_shared(a, m), True)
if __name__ == "__main__":
Testing.main([(TestBuffer, 'test')])
| hlzz/dotfiles | graphics/VTK-7.0.0/Common/Core/Testing/Python/TestBuffer.py | Python | bsd-3-clause | 5,200 | [
"VTK"
] | 2c258a8e7d67d186c3d55747e7a001df6d46c32da05718cf1816ae5d63dcea24 |
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test pickling of Iris objects.
"""
from __future__ import with_statement
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import cPickle
import StringIO
import iris
class TestPickle(tests.IrisTest):
def pickle_then_unpickle(self, obj):
"""Returns a generator of ("cpickle protocol number", object) tuples."""
for protocol in xrange(1 + cPickle.HIGHEST_PROTOCOL):
str_buffer = StringIO.StringIO()
cPickle.dump(obj, str_buffer, protocol)
# move the str_buffer back to the start and reconstruct
str_buffer.seek(0)
reconstructed_obj = cPickle.load(str_buffer)
yield protocol, reconstructed_obj
@iris.tests.skip_data
def test_cube_pickle(self):
cube = iris.load_cube(tests.get_data_path(('PP', 'globClim1', 'theta.pp')))
self.assertCML(cube, ('cube_io', 'pickling', 'theta.cml'), checksum=False)
for _, recon_cube in self.pickle_then_unpickle(cube):
self.assertNotEqual(recon_cube._data_manager, None)
self.assertEqual(cube._data_manager, recon_cube._data_manager)
self.assertCML(recon_cube, ('cube_io', 'pickling', 'theta.cml'), checksum=False)
@iris.tests.skip_data
def test_cube_with_deferred_coord_points(self):
# Data with 2d lats and lons that when loaded results in points that
# are LazyArray objects.
filename = tests.get_data_path(('NetCDF',
'rotated',
'xy',
'rotPole_landAreaFraction.nc'))
cube = iris.load_cube(filename)
# Pickle and unpickle. Do not perform any CML tests
# to avoid side effects.
_, recon_cube = next(self.pickle_then_unpickle(cube))
self.assertEqual(recon_cube, cube)
@iris.tests.skip_data
def test_cubelist_pickle(self):
cubelist = iris.load(tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp')))
single_cube = cubelist[0]
self.assertCML(cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(single_cube, ('cube_io', 'pickling', 'single_cube.cml'))
for _, reconstructed_cubelist in self.pickle_then_unpickle(cubelist):
self.assertCML(reconstructed_cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(reconstructed_cubelist[0], ('cube_io', 'pickling', 'single_cube.cml'))
for cube_orig, cube_reconstruct in zip(cubelist, reconstructed_cubelist):
self.assertArrayEqual(cube_orig.data, cube_reconstruct.data)
self.assertEqual(cube_orig, cube_reconstruct)
def test_picking_equality_misc(self):
items_to_test = [
iris.unit.Unit("hours since 2007-01-15 12:06:00", calendar=iris.unit.CALENDAR_STANDARD),
iris.unit.as_unit('1'),
iris.unit.as_unit('meters'),
iris.unit.as_unit('no-unit'),
iris.unit.as_unit('unknown')
]
for orig_item in items_to_test:
for protocol, reconstructed_item in self.pickle_then_unpickle(orig_item):
fail_msg = ('Items are different after pickling at protocol %s.'
'\nOrig item: %r\nNew item: %r' % (protocol, orig_item, reconstructed_item)
)
self.assertEqual(orig_item, reconstructed_item, fail_msg)
if __name__ == "__main__":
tests.main()
| kwilliams-mo/iris | lib/iris/tests/test_pickling.py | Python | gpl-3.0 | 4,374 | [
"NetCDF"
] | 389d6d3588162359177ef332185ac614be1e81ab7528e3b48c9a4a50abb5730f |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module defines classes for point defects.
"""
__author__ = "Bharat Medasani, Nils E. R. Zimmermann"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani, Nils E. R. Zimmermann"
__email__ = "mbkumar@gmail.com, n.zimmermann@tuhh.de"
__status__ = "Production"
__date__ = "Nov 28, 2016"
import os
import abc
import json
import numpy as np
from bisect import bisect_left
import time
from math import fabs
from pymatgen.core.periodic_table import Specie, Element
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_analyzer import OrderParameters
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer, \
SpacegroupOperations
from pymatgen.io.zeopp import get_voronoi_nodes, get_void_volume_surfarea, \
get_high_accuracy_voronoi_nodes
from pymatgen.command_line.gulp_caller import get_energy_buckingham, \
get_energy_relax_structure_buckingham
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder, \
RelaxationAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpeciesComparator
from pymatgen.analysis.bond_valence import BVAnalyzer
import six
from six.moves import filter
from six.moves import map
from six.moves import zip
file_dir = os.path.dirname(__file__)
rad_file = os.path.join(file_dir, 'ionic_radii.json')
with open(rad_file, 'r') as fp:
_ion_radii = json.load(fp)
class ValenceIonicRadiusEvaluator(object):
"""
Computes site valences and ionic radii for a structure using bond valence
analyzer
Args:
structure: pymatgen.core.structure.Structure
"""
def __init__(self, structure):
self._structure = structure.copy()
self._valences = self._get_valences()
self._ionic_radii = self._get_ionic_radii()
@property
def radii(self):
"""
List of ionic radii of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
#print radii_dict
return radii_dict
@property
def valences(self):
"""
List of oxidation states of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
valence_dict = dict(zip(el, self._valences))
return valence_dict
@property
def structure(self):
"""
Returns oxidation state decorated structure.
"""
return self._structure.copy()
def _get_ionic_radii(self):
"""
Computes ionic radii of elements for all sites in the structure.
If valence is zero, atomic radius is used.
"""
radii = []
coord_finder = VoronoiCoordFinder(self._structure)
def nearest_key(sorted_vals, key):
i = bisect_left(sorted_vals, key)
if i == len(sorted_vals):
return sorted_vals[-1]
if i == 0:
return sorted_vals[0]
before = sorted_vals[i-1]
after = sorted_vals[i]
if after-key < key-before:
return after
else:
return before
for i in range(len(self._structure.sites)):
site = self._structure.sites[i]
if isinstance(site.specie,Element):
radius = site.specie.atomic_radius
# Handle elements with no atomic_radius
# by using calculated values instead.
if radius is None:
radius = site.specie.atomic_radius_calculated
if radius is None:
raise ValueError(
"cannot assign radius to element {}".format(
site.specie))
radii.append(radius)
continue
el = site.specie.symbol
oxi_state = int(round(site.specie.oxi_state))
coord_no = int(round(coord_finder.get_coordination_number(i)))
try:
tab_oxi_states = sorted(map(int, _ion_radii[el].keys()))
oxi_state = nearest_key(tab_oxi_states, oxi_state)
radius = _ion_radii[el][str(oxi_state)][str(coord_no)]
except KeyError:
if coord_finder.get_coordination_number(i)-coord_no > 0:
new_coord_no = coord_no + 1
else:
new_coord_no = coord_no - 1
try:
radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)]
coord_no = new_coord_no
except:
tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys()))
new_coord_no = nearest_key(tab_coords, coord_no)
i = 0
for val in tab_coords:
if val > coord_no:
break
i = i + 1
if i == len(tab_coords):
key = str(tab_coords[-1])
radius = _ion_radii[el][str(oxi_state)][key]
elif i == 0:
key = str(tab_coords[0])
radius = _ion_radii[el][str(oxi_state)][key]
else:
key = str(tab_coords[i-1])
radius1 = _ion_radii[el][str(oxi_state)][key]
key = str(tab_coords[i])
radius2 = _ion_radii[el][str(oxi_state)][key]
radius = (radius1+radius2)/2
#implement complex checks later
radii.append(radius)
return radii
def _get_valences(self):
"""
Computes ionic valences of elements for all sites in the structure.
"""
try:
bv = BVAnalyzer()
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
try:
bv = BVAnalyzer(symm_tol=0.0)
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
valences = []
for site in self._structure.sites:
if len(site.specie.common_oxidation_states) > 0:
valences.append(site.specie.common_oxidation_states[0])
# Handle noble gas species
# which have no entries in common_oxidation_states.
else:
valences.append(0)
if sum(valences):
valences = [0]*self._structure.num_sites
else:
self._structure.add_oxidation_state_by_site(valences)
#raise
#el = [site.specie.symbol for site in self._structure.sites]
#el = [site.species_string for site in self._structure.sites]
#el = [site.specie for site in self._structure.sites]
#valence_dict = dict(zip(el, valences))
#print valence_dict
return valences
class Defect(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class for point defects
"""
@abc.abstractmethod
def enumerate_defectsites(self):
"""
Enumerates all the symmetrically distinct defects.
"""
raise NotImplementedError()
@property
def structure(self):
"""
Returns the structure without any defects
Useful for Mott-Littleton calculations.
"""
return self._structure
@property
def struct_radii(self):
"""
Radii of elements in the structure
"""
return self._rad_dict
@property
def struct_valences(self):
"""
Valence of elements in the structure
"""
return self._valence_dict
def defectsite_count(self):
"""
Returns the number of symmetrically distinct defect sites
"""
return len(self._defect_sites)
def get_defectsite(self, n):
"""
Returns the defect site at the index.
"""
return self._defect_sites[n]
def get_defectsite_multiplicity(self, n):
"""
Returns the symmtric multiplicity of the defect site at the index.
"""
return self._defect_site_multiplicity[n]
def get_defectsite_coordination_number(self, n):
"""
Coordination number of interstitial site.
Args:
n: Index of interstitial list
"""
return self._defectsite_coord_no[n]
def get_coordinated_sites(self, n):
"""
The sites in structure surrounding the defect site.
Args:
n: Index of defects list
"""
return self._defect_coord_sites[n]
def get_coordinated_elements(self, n):
"""
Elements of sites in structure surrounding the defect site.
Args:
n: Index of defect list
"""
coordinated_species = []
for site in self._defect_coord_sites[n]:
coordinated_species.append(site.specie.symbol)
return list(set(coordinated_species))
@abc.abstractmethod
def make_supercells_with_defects(self, scaling_matrix):
"""
Generate the supercell with input multipliers and create the defect.
First supercell has no defects.
To create unit cell with defect pass unit matrix.
"""
raise NotImplementedError()
class Vacancy(Defect):
"""
Subclass of Defect to generate vacancies and their analysis.
Args:
structure: pymatgen.core.structure.Structure
valences: valences of elements as a dictionary
radii: Radii of elements as a dictionary
"""
def __init__(self, structure, valences, radii):
self._structure = structure
self._valence_dict = valences
self._rad_dict = radii
# Store symmetrically distinct sites, their coordination numbers
# coordinated_sites, effective charge
symm_finder = SpacegroupAnalyzer(self._structure)
symm_structure = symm_finder.get_symmetrized_structure()
equiv_site_seq = symm_structure.equivalent_sites
self._defect_sites = []
self._defect_site_multiplicity = []
for equiv_sites in equiv_site_seq:
self._defect_sites.append(equiv_sites[0])
self._defect_site_multiplicity.append(len(equiv_sites))
self._vac_site_indices = []
for site in self._defect_sites:
for i in range(len(self._structure.sites)):
if site == self._structure[i]:
self._vac_site_indices.append(i)
coord_finder = VoronoiCoordFinder(self._structure)
self._defectsite_coord_no = []
self._defect_coord_sites = []
for i in self._vac_site_indices:
self._defectsite_coord_no.append(
coord_finder.get_coordination_number(i)
)
self._defect_coord_sites.append(
coord_finder.get_coordinated_sites(i)
)
# Store the ionic radii for the elements in the structure
# (Used to computing the surface are and volume)
# Computed based on valence of each element
self._vac_eff_charges = None
self._vol = None
self._sa = None
#@property
#def valence_dict(self):
# return self._valence_dict
def enumerate_defectsites(self):
"""
Returns symmetrically distinct vacancy sites
"""
return self._defect_sites
def get_defectsite_structure_indices(self):
"""
Returns indices of symmetrically distinct vacancy sites
"""
return self._vac_site_indices
def get_defectsite_structure_index(self, n):
"""
index of the vacacy site in the structure.sites list
Args:
n:
Index of vacancy list
"""
return self._vac_site_indices[n]
def get_defectsite_effective_charge(self, n):
"""
Effective charge (In Kroger-Vink notation, cation vacancy has
effectively -ve charge and anion vacancy has +ve charge.)
Args:
n: Index of vacancy list
Returns:
Effective charnge of defect site
"""
# Effective charge (In Kroger-Vink notation, cation vacancy has
# effectively -ve charge and anion vacancy has +ve charge.) Inverse
# the BVAnalyzer.get_valences result.
el = self.get_defectsite(n).species_string
return -self._valence_dict[el]
#if not self._vac_eff_charges:
# self._vac_eff_charges = []
# for site in self.enumerate_defectsites():
# specie = site.specie.symbol
# self._vac_eff_charges.append(-self._valence_dict[specie])
#return self._vac_eff_charges[n]
def get_coordsites_min_max_charge(self, n):
"""
Minimum and maximum charge of sites surrounding the vacancy site.
Args:
n: Index of vacancy list
"""
bv = BVAnalyzer()
struct_valences = bv.get_valences(self._structure)
coordinated_site_valences = []
def _get_index(site):
for i in range(len(self._structure.sites)):
if site.is_periodic_image(self._structure.sites[i]):
return i
raise ValueError("Site not found")
for site in self._defect_coord_sites[n]:
ind = _get_index(site)
coordinated_site_valences.append(struct_valences[ind])
coordinated_site_valences.sort()
return coordinated_site_valences[0], coordinated_site_valences[-1]
# deprecated
def get_volume(self, n):
"""
Volume of the nth vacancy
Args:
n: Index of symmetrically distinct vacancies list
Returns:
floating number representing volume of vacancy
"""
if not self._vol:
self._vol = []
self._sa = []
um = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sc = self.make_supercells_with_defects(um)[1:]
rad_dict = self.struct_radii
for i in range(len(sc)):
vol, sa = get_void_volume_surfarea(sc[i], rad_dict)
self._vol.append(vol)
self._sa.append(sa)
return self._vol[n]
# deprecated
def get_surface_area(self, n):
"""
Surface area of the nth vacancy
Args:
n: Index of symmetrically distinct vacancies list
Returns:
floating number representing volume of vacancy
"""
if not self._sa:
self._vol = []
self._sa = []
um = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
supercells = self.make_supercells_with_defects(um)[1:]
rad_dict = self.struct_radii
for sc in supercells:
vol, sa = get_void_volume_surfarea(sc, rad_dict)
self._vol.append(vol)
self._sa.append(sa)
return self._sa[n]
def _supercell_with_defect(self, scaling_matrix, defect_site):
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
oldf_coords = defect_site.frac_coords
coords = defect_site.lattice.get_cartesian_coords(oldf_coords)
newf_coords = sc.lattice.get_fractional_coords(coords)
sc_defect_site = PeriodicSite(defect_site.species_and_occu, newf_coords,
sc.lattice,
properties=defect_site.properties)
for i in range(len(sc.sites)):
#if sc_defect_site == sc.sites[i]:
if sc_defect_site.distance(sc.sites[i]) < 1e-3:
del sc[i]
return sc
raise ValueError('Something wrong if reached here')
def make_supercells_with_defects(self, scaling_matrix, species=None,
limit_return_structures=False):
"""
Generate sequence of supercells in pymatgen.core.structure.Structure
format, with each supercell containing one vacancy.
Args:
scaling_matrix: super cell scale parameters in matrix forms
species: Species in list format only for which vacancy supercells
are required. If not specified all the species are considered.
limit_return_structures: Boolean or positive number
If number, only that many structures are returned.
Returns:
Supercells with vacancies. First supercell has no defects.
"""
sc_with_vac = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
sc_with_vac.append(sc)
if not species:
species = sc.symbol_set
if not limit_return_structures:
limit_return_structures = self.defectsite_count()
for defect_site in self.enumerate_defectsites():
if len(sc_with_vac) <= limit_return_structures:
if isinstance(defect_site.specie,Specie):
site_specie = defect_site.specie.element.symbol
elif isinstance(defect_site.specie,Element):
site_specie = defect_site.specie.symbol
else:
raise TypeError("site specie is neither Specie nor Element")
if site_specie in species:
sc_with_vac.append(self._supercell_with_defect(
scaling_matrix, defect_site))
return sc_with_vac
class VacancyFormationEnergy(object):
"""
Using GULP compute the vacancy formation energy.
Works only for binary metal oxides due to the use of Buckingham Potentials
"""
def __init__(self, vacancy):
self._vacancy = vacancy
self._energies = []
def get_energy(self, n, tol=0.5):
"""
Formation Energy for nth symmetrically distinct vacancy.
"""
#generate defect free structure energy
if not self._energies:
no_vac = self._vacancy.defectsite_count()
prev_energies = [0.0] * no_vac
tol_flg = [False] * no_vac
vac_gulp_kw = ('optimise', 'conp', 'qok')
val_dict = self._vacancy.struct_valences
for sp in range(2, 6):
if not (False in tol_flg):
#print sp
break
scale_mat = [[sp, 0, 0], [0, sp, 0], [0, 0, sp]]
sc = self._vacancy.make_supercells_with_defects(scale_mat)
blk_energy = get_energy_buckingham(sc[0])
no = len(sc[0].sites)
#print no
for i in range(1, no_vac + 1):
if not tol_flg[i - 1]:
vac_energy = get_energy_buckingham(
sc[i], keywords=vac_gulp_kw,
valence_dict=val_dict
)
form_energy = vac_energy - (no - 1) / no * blk_energy
if abs(form_energy - prev_energies[i - 1]) < tol:
tol_flg[i - 1] = True
prev_energies[i - 1] = form_energy
self._energies = prev_energies
self._tol_flg = tol_flg
if not self._tol_flg[n]:
print("Caution: tolerance not reached for {0} vacancy".format(n))
return self._energies[n]
class Interstitial(Defect):
"""
Subclass of Defect to generate interstitial sites
"""
def __init__(self, structure, valences, radii, site_type='voronoi_vertex',
accuracy='Normal', symmetry_flag=True, oxi_state = False):
"""
Given a structure, generate symmetrically distinct interstitial sites.
For a non-ionic structure, use oxi_state=True and give atomic radii.
Args:
structure: pymatgen.core.structure.Structure
valences: Dictionary of oxidation states of elements in
{el:valence} form
radii: Radii of elemnts in the structure
site_type: "voronoi_vertex" uses voronoi nodes
"voronoi_edgecenter" uses voronoi polyhedra edge centers
"voronoi_facecenter" uses voronoi polyhedra face centers
"all" combines vertices, edgecenters and facecenters.
Default is "voronoi_vertex"
accuracy: Flag denoting whether to use high accuracy version
of Zeo++. Options are "Normal" and "High". Default is normal.
symmetry_flag: If True, only returns symmetrically distinct sites
oxi_state: If False, input structure is considered devoid of
oxidation-state decoration. And oxi-state for each site is
determined. Use True, if input structure is oxi-state
decorated. This option is useful when the structure is
not electro-neutral after deleting/adding sites. In that
case oxi-decorate the structure before deleting/adding the
sites.
"""
if not oxi_state:
self._structure = ValenceIonicRadiusEvaluator(structure).structure
else:
self._structure = structure
self._valence_dict = valences
self._rad_dict = radii
"""
Use Zeo++ to obtain the voronoi nodes. Apply symmetry reduction
and the symmetry reduced voronoi nodes are possible candidates
for interstitial sites.
"""
if accuracy == "Normal":
high_accuracy_flag = False
elif accuracy == "High":
high_accuracy_flag = True
else:
raise NotImplementedError("Accuracy setting not implemented.")
if accuracy == "High":
if site_type in ('voronoi_facecenter','voronoi_edgecenter','all'):
raise NotImplementedError(
"Site type not implemented for the accuracy setting")
vor_node_sites, vor_edgecenter_sites, vor_facecenter_sites = \
symmetry_reduced_voronoi_nodes(self._structure, self._rad_dict,
high_accuracy_flag, symmetry_flag)
if site_type == 'voronoi_vertex':
possible_interstitial_sites = vor_node_sites
elif site_type == 'voronoi_facecenter':
possible_interstitial_sites = vor_facecenter_sites
elif site_type == 'voronoi_edgecenter':
possible_interstitial_sites = vor_edgecenter_sites
elif site_type == "all":
possible_interstitial_sites = vor_node_sites + \
vor_facecenter_sites + vor_edgecenter_sites
else:
raise ValueError("Input site type not implemented")
#Do futher processing on possibleInterstitialSites to obtain
#interstitial sites
self._defect_sites = possible_interstitial_sites
self._defectsite_coord_no = []
self._defect_coord_sites = []
self._defect_coord_charge = []
self._radii = []
for site in self._defect_sites:
coord_no, coord_sites, chrg = self._get_coord_no_sites_chrg(site)
self._defectsite_coord_no.append(coord_no)
self._defect_coord_sites.append(coord_sites)
self._defect_coord_charge.append(chrg)
for site in self._defect_sites:
vor_radius = site.properties.get('voronoi_radius',None)
if vor_radius:
vor_radius = float(vor_radius)
self._radii.append(vor_radius)
def _get_coord_no_sites_chrg(self, site):
"""
Compute the coordination number and coordination charge
Args:
site:
pymatgen.core.sites.Site
"""
struct = self._structure.copy()
struct.append(site.specie.symbol, site.frac_coords)
coord_finder = VoronoiCoordFinder(struct)
coord_no = coord_finder.get_coordination_number(-1)
coord_sites = coord_finder.get_coordinated_sites(-1)
# In some cases coordination sites to interstitials include
# interstitials also. Filtering them.
def no_inter(site):
return not site.specie.symbol == 'X'
coord_sites = filter(no_inter, coord_sites)
coord_chrg = 0
if self._valence_dict:
for site, weight in coord_finder.get_voronoi_polyhedra(-1).items():
if not site.specie.symbol == 'X':
coord_chrg += weight * self._valence_dict[site.species_string]
return coord_no, coord_sites, coord_chrg
def enumerate_defectsites(self):
"""
Enumerate all the symmetrically distinct interstitial sites.
The defect site has "X" as occupied specie.
"""
return self._defect_sites
def append_defectsite(self, site):
"""
Append a site to list of possible interstitials
Args:
site: pymatgen.core.sites.Site
"""
raise NotImplementedError()
def delete_defectsite(self, n):
"""
Remove a symmetrically distinct interstitial site
Args:
n: Index of interstitial site
"""
del self._defect_sites[n]
def get_coordsites_charge_sum(self, n):
"""
Total charge of the interstitial coordinated sites.
Args:
n: Index of interstitial list
"""
return self._defect_coord_charge[n]
def get_coordsites_min_max_charge(self, n):
"""
Minimum and maximum charge of sites surrounding the interstitial site.
Args:
n: Index of symmetrical distinct interstitial site
"""
coord_site_valences = []
for site in self._defect_coord_sites[n]:
coord_site_valences.append(self._valence_dict[site.specie.symbol])
coord_site_valences.sort()
return coord_site_valences[0], coord_site_valences[-1]
def get_radius(self, n):
"""
Volume of the nth interstitial
Args:
n: Index of symmetrically distinct vacancies list
Returns:
floating number representing radius of interstitial sphere
"""
return self._radii[n]
def get_radii(self):
return self._radii
def reduce_defectsites(self):
"""
If multiple defect sites have same voronoi radius, only one is kept.
Useful if the symmetry based reduction of initial sites returned
from Zeo++ is not working properly due to deviation in ideal lattice
coordinates.
"""
distinct_radii = list(set(self._radii))
for rad in distinct_radii:
ind = self._radii.index(rad) # Index of first site with 'rad'
for i in reversed(list(range(ind + 1, len(self._radii)))):
# Backward search for remaining sites so index is not changed
if self._radii[i] == rad:
self._defect_sites.pop(i)
self._defectsite_coord_no.pop(i)
self._defect_coord_sites.pop(i)
self._radii.pop(i)
def radius_prune_defectsites(self, radius):
"""
Remove all the defect sites with voronoi radius less than input radius
"""
for i in reversed(list(range(len(self._radii)))):
if self._radii[i] < radius:
self._defect_sites.pop(i)
self._defectsite_coord_no.pop(i)
self._defect_coord_sites.pop(i)
self._radii.pop(i)
def prune_defectsites(self, el="C", oxi_state=4, dlta=0.1):
"""
Prune all the defect sites which can't acoomodate the input elment
with the input oxidation state.
"""
rad = float(Specie(el, oxi_state).ionic_radius) - dlta
self.radius_prune_defectsites(rad)
def prune_close_defectsites(self, dist=0.2):
"""
Prune the sites that are very close.
"""
#print self.defectsite_count()
ind = 0
while ind < self.defectsite_count():
#i = ind + 1
#while i < self.defectsite_count():
i = self.defectsite_count()-1
#print ind, i
while i > ind:
d = self._defect_sites[ind].distance(self._defect_sites[i])
#print d, dist
if d < dist:
self._defect_sites.pop(i)
#self._defectsite_coord_no.pop(i)
#self._defect_coord_sites.pop(i)
#self._radii.pop(i)
# i += 1
i -= 1
ind += 1
#print self.defectsite_count()
def _supercell_with_defect(self, scaling_matrix, defect_site, element):
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
oldf_coords = defect_site.frac_coords
coords = defect_site.lattice.get_cartesian_coords(oldf_coords)
#print coords
newf_coords = sc.lattice.get_fractional_coords(coords)
for i in range(3):
coord = newf_coords[i]
if coord < 0:
while (coord < 0):
coord = coord+1
newf_coords[i] = coord
elif coord > 1:
while (coord > 1):
coord = coord-1
newf_coords[i] = coord
#print newf_coords
#sc_defect_site = PeriodicSite(element, newf_coords,
# sc.lattice)
try:
sc.append(element, newf_coords, coords_are_cartesian=False,
validate_proximity=True)
except ValueError:
sc = None
finally:
return sc
def make_supercells_with_defects(self, scaling_matrix, element):
"""
Returns sequence of supercells in pymatgen.core.structure.Structure
format, with each supercell containing an interstitial.
First supercell has no defects.
"""
sc_list_with_interstitial = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
sc_list_with_interstitial.append(sc)
for defect_site in self.enumerate_defectsites():
sc_with_inter = self._supercell_with_defect(
scaling_matrix, defect_site, element
)
if sc_with_inter:
sc_list_with_interstitial.append(sc_with_inter)
return sc_list_with_interstitial
class InterstitialAnalyzer(object):
"""
Use GULP to compute the interstitial formation energy, relaxed structures.
Works only for metal oxides due to the use of Buckingham Potentials.
Args:
inter: pymatgen.defects.point_defects.Interstitial
el: Element name in short hand notation ("El")
oxi_state: Oxidtation state
scd: Super cell dimension as number. The scaling is equal along xyz.
"""
def __init__(self, inter, el, oxi_state, scd=2):
self._inter = inter
self._el = el
self._oxi_state = oxi_state
self._scd = scd
self._relax_energies = []
self._norelax_energies = []
self._relax_struct = []
def get_energy(self, n, relax=True):
"""
Formation Energy for nth symmetrically distinct interstitial.
"""
if relax and not self._relax_energies:
self._relax_analysis()
if not relax and not self._norelax_energies:
no_inter = self._inter.defectsite_count()
inter_gulp_kw = ('qok',)
val_dict = self._inter.struct_valences
val_dict[self._el] = self._oxi_state # If element not in structure
scd = self._scd
scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]]
sc = self._inter.make_supercells_with_defects(scale_mat, self._el)
blk_energy = get_energy_buckingham(sc[0])
for i in range(1, no_inter + 1):
inter_energy = get_energy_buckingham(
sc[i], keywords=inter_gulp_kw, valence_dict=val_dict
)
form_energy = inter_energy - blk_energy
self._norelax_energies.append(form_energy)
if relax:
return self._relax_energies[n]
else:
return self._norelax_energies[n]
def _relax_analysis(self):
"""
Optimize interstitial structures
"""
no_inter = self._inter.defectsite_count()
inter_gulp_kw = ('optimise', 'conp', 'qok')
val_dict = self._inter.struct_valences
scd = self._scd
scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]]
sc = self._inter.make_supercells_with_defects(scale_mat, self._el)
blk_energy, rlx_struct = get_energy_relax_structure_buckingham(sc[0])
self._relax_struct.append(rlx_struct)
val_dict[self._el] = self._oxi_state # If element not in structure
for i in range(1, no_inter + 1):
energy, rlx_struct = get_energy_relax_structure_buckingham(
sc[i], keywords=inter_gulp_kw, valence_dict=val_dict
)
form_energy = energy - blk_energy
self._relax_energies.append(form_energy)
self._relax_struct.append(rlx_struct)
def get_relaxed_structure(self, n):
"""
Optimized interstitial structure
Args:
n: Symmetrically distinct interstitial index
.. note::
To get relaxed bulk structure pass -1.
-ve index will not work as expected.
"""
if not self._relax_struct:
self._relax_analysis()
return self._relax_struct[n + 1]
def get_percentage_volume_change(self, n):
"""
Volume change after the introduction of interstitial
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_struct:
self._relax_analysis()
blk_struct = self._relax_struct[0]
def_struct = self._relax_struct[n + 1:n + 2][0]
del def_struct.sites[-1]
rv = RelaxationAnalyzer(blk_struct, def_struct)
return rv.get_percentage_volume_change()
def get_percentage_lattice_parameter_change(self, n):
"""
Lattice parameter change after the introduction of interstitial
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_struct:
self._relax_analysis()
blk_struct = self._relax_struct[0]
def_struct = self._relax_struct[n + 1:n + 2][0]
del def_struct.sites[-1]
rv = RelaxationAnalyzer(blk_struct, def_struct)
return rv.get_percentage_lattice_parameter_changes()
def get_percentage_bond_distance_change(self, n):
"""
Bond distance change after the introduction of interstitial
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_struct:
self._relax_analysis()
blk_struct = self._relax_struct[0]
def_struct = self._relax_struct[n + 1:n + 2][0]
del def_struct.sites[-1]
#print def_struct
rv = RelaxationAnalyzer(blk_struct, def_struct)
return rv.get_percentage_bond_dist_changes()
def relaxed_structure_match(self, i, j):
"""
Check if the relaxed structures of two interstitials match
Args:
i: Symmetrically distinct interstitial index
j: Symmetrically distinct interstitial index
.. note::
To use relaxed bulk structure pass -1.
-ve index will not work as expected
"""
if not self._relax_struct:
self._relax_analysis()
sm = StructureMatcher()
struct1 = self._relax_struct[i + 1]
struct2 = self._relax_struct[j + 1]
return sm.fit(struct1, struct2)
class StructureRelaxer(object):
def __init__(self, structure):
self._unrelax_struct = structure
self.relax()
def relax(self):
energy, rlx_struct = get_energy_relax_structure_buckingham(
self._unrelax_struct)
self._relax_struct = rlx_struct
def get_relaxed_structure(self):
return self._relax_struct
class InterstitialStructureRelaxer(object):
"""
Performs structural relaxation for each interstitial supercell.
Args:
interstitial: Unrelaxed interstitial
el: Species string in short notation
oxi_state: Oxidation state of the element
supercell_dim: Dimensions of super cell
"""
def __init__(self, interstitial, el, oxi_state, supercell_dim=2):
self._inter = interstitial
self._scd = supercell_dim
self._el = el
self._oxi_state = oxi_state
self._relax_structs = []
self._relax_energies = []
def relax(self):
"""
Optimize interstitial structures
"""
no_inter = self._inter.defectsite_count()
inter_gulp_kw = ('optimise', 'conp', 'qok')
val_dict = self._inter.struct_valences
scd = self._scd
scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]]
sc = self._inter.make_supercells_with_defects(scale_mat, self._el)
blk_energy, rlx_struct = get_energy_relax_structure_buckingham(sc[0])
self._relax_structs.append(rlx_struct)
self._relax_energies.append(blk_energy)
val_dict[self._el] = self._oxi_state # If element not in structure
for i in range(1, no_inter + 1):
try:
energy, rlx_struct = get_energy_relax_structure_buckingham(
sc[i], keywords=inter_gulp_kw, valence_dict=val_dict
)
self._relax_energies.append(energy)
self._relax_structs.append(rlx_struct)
except:
self._relax_energies.append(None)
self._relax_structs.append(None)
def is_empty(lst):
for value in lst:
if value:
return False
return True
if is_empty(self._relax_energies):
raise IOError('Relaxation failed')
def relaxed_structure_match(self, i, j):
"""
Check if the relaxed structures of two interstitials match
Args:
i: Symmetrically distinct interstitial index
j: Symmetrically distinct interstitial index
.. note::
Index 0 corresponds to bulk.
"""
if not self._relax_structs:
self.relax()
sm = StructureMatcher()
struct1 = self._relax_structs[i]
struct2 = self._relax_structs[j]
return sm.fit(struct1, struct2)
def relaxed_energy_match(self, i, j):
"""
Check if the relaxed energies of two interstitials match
Args:
i: Symmetrically distinct interstitial index
j: Symmetrically distinct interstitial index
.. note::
Index 0 corresponds to bulk.
"""
if not self._relax_energies:
self.relax()
energy1 = self._relax_energies[i]
energy2 = self._relax_energies[j]
return energy1 == energy2
def get_relaxed_structure(self, n):
"""
Get the relaxed structure of nth symmetrically distinct interstitial.
Args:
n: Symmetrically distinct interstitial index
.. note::
0 corresponds to relaxed bulk structure
"""
if not self._relax_structs:
self.relax()
return self._relax_structs[n]
def get_relaxed_energy(self, n):
"""
Get the relaxed structure of nth symmetrically distinct interstitial.
Args:
n: Symmetrically distinct interstitial index
.. note::
0 corresponds to relaxed bulk structure
"""
if not self._relax_energies:
self.relax()
return self._relax_energies[n]
def get_relaxed_interstitial(self):
"""
Get the relaxed structure of nth symmetrically distinct interstitial.
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_energies:
self.relax()
energies = self._relax_energies[:]
structs = self._relax_structs[:]
distinct_energy_set = set(energies[1:]) # only interstitial energies
if None in distinct_energy_set:
distinct_energy_set.remove(None)
distinct_structs = [structs[0]] # bulk
distinct_energies = [energies[0]]
for energy in distinct_energy_set:
ind = energies.index(energy)
distinct_structs.append(structs[ind])
distinct_energies.append(energies[ind])
return RelaxedInterstitial(
distinct_structs, distinct_energies, self._inter.struct_valences
)
class RelaxedInterstitial(object):
"""
Stores the relaxed supercell structures for each interstitial
Used to compute formation energies, displacement of atoms near the
the interstitial.
Args:
struct_list: List of structures(supercells). The first structure should
represent relaxed bulk structure and the subsequent ones
interstitial structures (with the extra interstitial site
appended at the end).
energy_list: List of energies for relaxed interstitial structures.
The first energy should correspond to bulk structure
valence_dict: Valences of elements in dictionary form
"""
def __init__(self, struct_list, energy_list, valence_dict):
self._blk_struct = struct_list[0]
struct_list.pop(0)
self._structs = struct_list
self._blk_energy = energy_list[0]
energy_list.pop(0)
self._energies = energy_list
self._valence_dict = valence_dict
self._coord_no = []
self._coord_sites = []
self._coord_charge_no = []
def formation_energy(self, n, chem_pot=0):
"""
Compute the interstitial formation energy
Args:
n: Index of interstitials
chem_pot: Chemical potential of interstitial site element.
If not given, assumed as zero. The user is strongly
urged to supply the chemical potential value
"""
return self._energies[n] - self._blk_energy - chem_pot
def get_percentage_volume_change(self, n):
"""
Volume change after the introduction of interstitial
Args:
n: index of interstitials
"""
def_struct = self._structs[n:n + 1][0]
del def_struct.sites[-1]
rv = RelaxationAnalyzer(self._blk_struct, def_struct)
return rv.get_percentage_volume_change()
def get_percentage_lattice_parameter_change(self, n):
"""
Lattice parameter change after the introduction of interstitial
Args:
n: index of interstitials
"""
def_struct = self._structs[n:n + 1][0] # copy
del def_struct.sites[-1]
rv = RelaxationAnalyzer(self._blk_struct, def_struct)
return rv.get_percentage_lattice_parameter_changes()
def get_percentage_bond_distance_change(self, n):
"""
Bond distance change after the introduction of interstitial.
Args:
n: index of interstitials
"""
def_struct = self._structs[n:n + 1][0] # copy
del def_struct.sites[-1]
rv = RelaxationAnalyzer(self._blk_struct, def_struct)
return rv.get_percentage_bond_dist_changes()
def get_bulk_structure(self):
"""
Return relaxed bulk structure
"""
return self._blk_struct
def get_interstitial_structure(self, n):
"""
Return relaxed bulk structure
"""
return self._structs[n]
def defect_count(self):
"""
Returns the number of distinct interstitials
"""
return len(self._structs)
def get_defectsite(self, n):
"""
Returns the defect site of nth interstitial.
Args:
n: Index of interstitial
"""
return self._structs[n][-1]
def get_coordination_number(self, n):
"""
Coordination number for nth interstitial.
Args:
n: Index of interstitials
"""
if not self._coord_no:
self._coord_find()
return self._coord_no[n]
def get_charge_coordination_number(self, n):
"""
Charge coordination number for nth interstitial.
Args:
n: Index of interstitials
"""
if not self._coord_charge_no:
self._coord_find()
return self._coord_charge_no[n]
def get_coordinated_sites(self, n):
"""
Coordinated sites for nth interstitial.
Args:
n: Index of interstitials
"""
if not self._coord_sites:
self._coord_find()
return self._coord_sites[n]
def get_coordinated_bulk_sites(self, n):
"""
Bulk sites corresponding to the coordinated sites for nth interstitial.
Args:
n: Index of interstitials
"""
blk_sites = []
for site in self.get_coordinated_sites(n):
site_index = self._structs[n].sites.index(site)
blk_sites.append(self._blk_struct[site_index])
return blk_sites
def get_coordinated_site_displacement(self, n):
"""
Compute the total displacement of coordinated sites from the
interstitial sites during the relaxation
Args:
n: Index of defect site
"""
coord_sites = self.get_coordinated_sites(n)
coord_blk_sites = self.get_coordinated_bulk_sites(n)
dist_sum = 0
for i in range(len(coord_sites)):
dist_sum += coord_sites[i].distance_from_point(coord_blk_sites[i])
# How to compute the average?
return dist_sum
def _coord_find(self):
"""
calls VoronoiCoordFinder to compute the coordination number,
coordination charge
"""
for i in range(self.defect_count()):
struct = self._structs[i].copy()
coord_finder = VoronoiCoordFinder(struct)
self._coord_no.append(coord_finder.get_coordination_number(-1))
self._coord_sites.append(coord_finder.get_coordinated_sites(-1))
coord_chrg = 0
for site, weight in coord_finder.get_voronoi_polyhedra(-1).items():
coord_chrg += weight * self._valence_dict[site.species_string]
self._coord_charge_no.append(coord_chrg)
def symmetry_reduced_voronoi_nodes(
structure, rad_dict, high_accuracy_flag=False, symm_flag=True):
"""
Obtain symmetry reduced voronoi nodes using Zeo++ and
pymatgen.symmetry.finder.SpacegroupAnalyzer
Args:
strucutre: pymatgen Structure object
rad_dict: Dictionary containing radii of spcies in the structure
high_accuracy_flag: Flag denotting whether to use high accuracy version of Zeo++
symm_flag: Flag denoting whether to return symmetrically distinct sites only
Returns:
Symmetrically distinct voronoi nodes as pymatgen Strucutre
"""
def add_closest_equiv_site(dist_sites, equiv_sites):
if not dist_sites:
dist_sites.append(equiv_sites[0])
else:
avg_dists = []
for site in equiv_sites:
dists = [site.distance(dst_site, jimage=[0, 0, 0])
for dst_site in dist_sites]
avg_dist = sum(dists) / len(dist_sites)
avg_dists.append(avg_dist)
min_avg_dist = min(avg_dists)
ind = avg_dists.index(min_avg_dist)
dist_sites.append(equiv_sites[ind])
def cmp_memoize_last_site(f): #Compares and stores last site
def not_duplicates(site1, site2):
if site1.distance(site2) < 1e-5:
return False
else:
return True
cmp_memoize_last_site.cache = None
def helper(x):
if not cmp_memoize_last_site.cache:
cmp_memoize_last_site.cache = f(x)
return True
y = f(x)
if not_duplicates(cmp_memoize_last_site.cache, y):
cmp_memoize_last_site.cache = y
return True
else:
return False
return helper
@cmp_memoize_last_site
def check_not_duplicates(site):
return site
if not symm_flag:
if not high_accuracy_flag:
vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct = \
get_voronoi_nodes(structure, rad_dict)
return vor_node_struct.sites, vor_edgecenter_struct.sites, \
vor_facecenter_struct.sites
else:
# Only the nodes are from high accuracy voronoi decomposition
vor_node_struct = \
get_high_accuracy_voronoi_nodes(structure, rad_dict)
# Before getting the symmetry, remove the duplicates
vor_node_struct.sites.sort(key = lambda site: site.voronoi_radius)
#print type(vor_node_struct.sites[0])
dist_sites = filter(check_not_duplicates, vor_node_struct.sites)
return dist_sites, None, None
if not high_accuracy_flag:
def get_dist_sites(vor_struct):
SpgA = SpacegroupAnalyzer
try:
symmetry_finder = SpgA(vor_struct, symprec=1e-1)
symm_struct = symmetry_finder.get_symmetrized_structure()
except:
vor_struct.merge_sites(0.1, 'delete')
symmetry_finder = SpgA(vor_struct, symprec=1e-1)
symm_struct = symmetry_finder.get_symmetrized_structure()
equiv_sites_list = symm_struct.equivalent_sites
if not equiv_sites_list:
dist_sites = vor_struct.sites
else:
dist_sites = []
for equiv_sites in equiv_sites_list:
add_closest_equiv_site(dist_sites, equiv_sites)
return dist_sites
vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct = \
get_voronoi_nodes(structure, rad_dict)
node_dist_sites = get_dist_sites(vor_node_struct)
edgecenter_dist_sites = get_dist_sites(vor_edgecenter_struct)
facecenter_dist_sites = get_dist_sites(vor_facecenter_struct)
return node_dist_sites, edgecenter_dist_sites, facecenter_dist_sites
else:
# Only the nodes are from high accuracy voronoi decomposition
vor_node_struct = \
get_high_accuracy_voronoi_nodes(structure, rad_dict)
# Before getting the symmetry, remove the duplicates
vor_node_struct.sites.sort(key = lambda site: site.voronoi_radius)
#print type(vor_node_struct.sites[0])
dist_sites = list(filter(check_not_duplicates, vor_node_struct.sites))
# Ignore symmetry from ha voronoi nodes
# Increase the symmetry precision to 0.25
#spg = SpacegroupAnalyzer(structure,symprec=1e-1).get_spacegroup()
# Remove symmetrically equivalent sites
#i = 0
#while (i < len(dist_sites)-1):
# sites1 = [dist_sites[i]]
# sites2 = [dist_sites[i+1]]
# if spg.are_symmetrically_equivalent(sites1,sites2):
# del dist_sites[i+1]
# else:
# i = i+1
node_dist_sites = dist_sites
return (node_dist_sites, None, None)
#vor_edge_symmetry_finder = SpacegroupAnalyzer(
# vor_edgecenter_struct, symprec=1e-1)
#vor_edge_symm_struct = vor_edge_symmetry_finder.get_symmetrized_structure()
#edgecenter_equiv_sites_list = vor_edge_symm_struct.equivalent_sites
#edgecenter_dist_sites = []
#for equiv_sites in edgecenter_equiv_sites_list:
# add_closest_equiv_site(edgecenter_dist_sites, equiv_sites)
#if not edgecenter_equiv_sites_list:
# edgecenter_dist_sites = vor_edgecenter_struct.sites
#vor_fc_symmetry_finder = SpacegroupAnalyzer(
# vor_facecenter_struct, symprec=1e-1)
#vor_fc_symm_struct = vor_fc_symmetry_finder.get_symmetrized_structure()
#facecenter_equiv_sites_list = vor_fc_symm_struct.equivalent_sites
#facecenter_dist_sites = []
#for equiv_sites in facecenter_equiv_sites_list:
# add_closest_equiv_site(facecenter_dist_sites, equiv_sites)
#if not facecenter_equiv_sites_list:
# facecenter_dist_sites = vor_facecenter_struct.sites
#return node_dist_sites, edgecenter_dist_sites, facecenter_dist_sites
def get_neighbors_of_site_with_index(struct, n, p=None):
"""
Determine the neighbors around the site that has index n in the input
Structure object struct, given the approach defined by parameters
p. All supported neighbor-finding approaches and listed and
explained in the following. All approaches start by creating a
tentative list of neighbors using a large cutoff radius defined in
parameter dictionary p via key "cutoff".
"min_dist": find nearest neighbor and its distance d_nn; consider all
neighbors which are within a distance of d_nn * (1 + delta),
where delta is an additional parameter provided in the
dictionary p via key "delta".
"scaled_VIRE": compute the radii, r_i, of all sites on the basis of
the valence-ionic radius evaluator (VIRE); consider all
neighbors for which the distance to the central site is less
than the sum of the radii multiplied by an a priori chosen
parameter, delta,
(i.e., dist < delta * (r_central + r_neighbor)).
"min_relative_VIRE": same approach as "min_dist", except that we
use relative distances (i.e., distances divided by the sum of the
atom radii from VIRE).
"min_relative_OKeeffe": same approach as "min_relative_VIRE", except
that we use the bond valence parameters from O'Keeffe's bond valence
method (J. Am. Chem. Soc. 1991, 3226-3229) to calculate
relative distances.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which
neighbors are to be determined.
p (dict): specification (via "approach" key; default is "min_dist")
and parameters of neighbor-finding approach.
Default cutoff radius is 6 Angstrom (key: "cutoff").
Other default parameters are as follows.
min_dist: "delta": 0.15;
min_relative_OKeeffe: "delta": 0.05;
min_relative_VIRE: "delta": 0.05;
scaled_VIRE: "delta": 2.
Returns: ([site]) list of sites that are considered to be nearest
neighbors to site with index n in Structure object struct.
"""
sites = []
if p is None:
p = {"approach": "min_dist", "delta": 0.15,
"cutoff": 6}
if p["approach"] not in [
"min_relative_OKeeffe", "min_dist", "min_relative_VIRE", \
"scaled_VIRE"]:
raise RuntimeError("Unsupported neighbor-finding approach"
" (\"{}\")".format(p["approach"]))
if p["approach"] == "min_relative_OKeeffe" or p["approach"] == "min_dist":
neighs_dists = struct.get_neighbors(struct[n], p["cutoff"])
try:
eln = struct[n].specie.element
except:
eln = struct[n].species_string
elif p["approach"] == "scaled_VIRE" or p["approach"] == "min_relative_VIRE":
vire = ValenceIonicRadiusEvaluator(struct)
if np.linalg.norm(struct[n].coords-vire.structure[n].coords) > 1e-6:
raise RuntimeError("Mismatch between input structure and VIRE structure.")
neighs_dists = vire.structure.get_neighbors(vire.structure[n], p["cutoff"])
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for neigh, dist in neighs_dists:
if p["approach"] == "scaled_VIRE":
dscale = p["delta"] * (vire.radii[neigh.species_string] + rn)
if dist < dscale:
sites.append(neigh)
elif p["approach"] == "min_relative_VIRE":
reldists_neighs.append([dist / (
vire.radii[neigh.species_string] + rn), neigh])
elif p["approach"] == "min_relative_OKeeffe":
try:
el2 = neigh.specie.element
except:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(
eln, el2), neigh])
elif p["approach"] == "min_dist":
reldists_neighs.append([dist, neigh])
if p["approach"] == "min_relative_VIRE" or \
p["approach"] == "min_relative_OKeeffe" or \
p["approach"] == "min_dist":
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, neigh in reldists_neighs:
if reldist / min_reldist < 1.0 + p["delta"]:
sites.append(neigh)
return sites
class StructureMotifInterstitial(Defect):
"""
Subclass of Defect to generate interstitial sites at positions
where the interstitialcy is coordinated by nearest neighbors
in a way that resembles basic structure motifs
(e.g., tetrahedra, octahedra). The algorithm will be formally
introducted in an upcoming publication
by Nils E. R. Zimmermann, Anubhav Jain, and Maciej Haranczyk,
and it is already used by the Python Charged Defect Toolkit
(PyCDT, https://arxiv.org/abs/1611.07481).
"""
__supported_types = ("tet", "oct", "bcc")
def __init__(self, struct, inter_elem,
motif_types=("tet", "oct"),
op_threshs=(0.3, 0.5),
dl=0.2, doverlap=1.0, facmaxdl=1.01, verbose=False):
"""
Generates symmetrically distinct interstitial sites at positions
where the interstitial is coordinated by nearest neighbors
in a pattern that resembles a supported structure motif
(e.g., tetrahedra, octahedra).
Args:
struct (Structure): input structure for which symmetrically
distinct interstitial sites are to be found.
inter_elem (string): element symbol of desired interstitial.
motif_types ([string]): list of structure motif types that are
to be considered. Permissible types are:
tet (tetrahedron), oct (octahedron).
op_threshs ([float]): threshold values for the underlying order
parameters to still recognize a given structural motif
(i.e., for an OP value >= threshold the coordination pattern
match is positive, for OP < threshold the match is
negative.
dl (float): grid fineness in Angstrom. The input
structure is divided into a grid of dimension
a/dl x b/dl x c/dl along the three crystallographic
directions, with a, b, and c being the lengths of
the three lattice vectors of the input unit cell.
doverlap (float): distance that is considered
to flag an overlap between any trial interstitial site
and a host atom.
facmaxdl (float): factor to be multiplied with the maximum grid
width that is then used as a cutoff distance for the
clustering prune step.
verbose (bool): flag indicating whether (True) or not (False;
default) to print additional information to screen.
"""
# Initialize interstitial finding.
self._structure = struct.copy()
self._motif_types = motif_types[:]
if len(self._motif_types) == 0:
raise RuntimeError("no motif types provided.")
self._op_threshs = op_threshs[:]
for imotif, motif in enumerate(self._motif_types):
if motif not in self.__supported_types:
raise RuntimeError("unsupported motif type: {}.".format(
motif))
self._dl = dl
self._defect_sites = []
self._defect_types = []
self._defect_site_multiplicity = []
self._defect_cns = []
rots, trans = SpacegroupAnalyzer(
struct)._get_symmetry()
nbins = [int(struct.lattice.a / dl), \
int(struct.lattice.b / dl), \
int(struct.lattice.c / dl)]
dls = [struct.lattice.a / float(nbins[0]), \
struct.lattice.b / float(nbins[1]), \
struct.lattice.c / float(nbins[2])]
maxdl = max(dls)
if verbose:
print("Grid size: {} {} {}".format(nbins[0], nbins[1], nbins[2]))
print("dls: {} {} {}".format(dls[0], dls[1], dls[2]))
struct_w_inter = struct.copy()
struct_w_inter.append(inter_elem, [0, 0, 0])
natoms = len(list(struct_w_inter.sites))
ops = OrderParameters(motif_types, cutoff=-10.0)
trialsites = []
# Loop over trial positions that are based on a regular
# grid in fractional coordinate space
# within the unit cell.
for ia in range(nbins[0]):
a = (float(ia)+0.5) / float(nbins[0])
for ib in range(nbins[1]):
b = (float(ib)+0.5) / float(nbins[1])
for ic in range(nbins[2]):
c = (float(ic)+0.5) / float(nbins[2])
struct_w_inter.replace(
natoms-1, inter_elem, coords=[a, b, c],
coords_are_cartesian=False)
if len(struct_w_inter.get_sites_in_sphere(struct_w_inter.sites[natoms-1].coords, doverlap)) == 1:
delta = 0.1
ddelta = 0.1
delta_end = 0.8
while delta < delta_end:
neighs = get_neighbors_of_site_with_index(
struct_w_inter, natoms-1, p={
"approach": "min_dist", "delta": delta,
"cutoff": 6})
nneighs = len(neighs)
if nneighs > 6:
break
if nneighs not in [4, 6]:
delta += ddelta
continue
allsites = [s for s in neighs]
indeces_neighs = [i for i in range(len(allsites))]
allsites.append(struct_w_inter.sites[natoms-1])
opvals = ops.get_order_parameters(
allsites, len(allsites)-1,
indeces_neighs=indeces_neighs)
motif_type = "unrecognized"
if "tet" in motif_types:
if nneighs == 4 and \
opvals[motif_types.index("tet")] > \
op_threshs[motif_types.index("tet")]:
motif_type = "tet"
this_op = opvals[motif_types.index("tet")]
if "oct" in motif_types:
if nneighs == 6 and \
opvals[motif_types.index("oct")] > \
op_threshs[motif_types.index("oct")]:
motif_type = "oct"
this_op = opvals[motif_types.index("oct")]
if motif_type != "unrecognized":
cns = {}
for isite, site in enumerate(neighs):
if isinstance(site.specie, Element):
elem = site.specie.symbol
else:
elem = site.specie.element.symbol
if elem in list(cns.keys()):
cns[elem] = cns[elem] + 1
else:
cns[elem] = 1
trialsites.append({
"mtype": motif_type,
"opval": this_op,
"delta": delta,
"coords": struct_w_inter.sites[natoms-1].coords[:],
"fracs": np.array([a, b, c]),
"cns": dict(cns)})
break
delta += ddelta
# Prune list of trial sites by clustering and find the site
# with the largest order parameter value in each cluster.
nintersites = len(trialsites)
unique_motifs = []
for ts in trialsites:
if ts["mtype"] not in unique_motifs:
unique_motifs.append(ts["mtype"])
labels = {}
connected = []
for i in range(nintersites):
connected.append([])
for j in range(nintersites):
dist, image = struct_w_inter.lattice.get_distance_and_image(
trialsites[i]["fracs"],
trialsites[j]["fracs"])
connected[i].append(True if dist < (maxdl*facmaxdl) else False)
include = []
for motif in unique_motifs:
labels[motif] = []
for i, ts in enumerate(trialsites):
labels[motif].append(i if ts["mtype"] == motif else -1)
change = True
while change:
change = False
for i in range(nintersites-1):
if change:
break
if labels[motif][i] == -1:
continue
for j in range(i+1, nintersites):
if labels[motif][j] == -1:
continue
if connected[i][j] and labels[motif][i] != labels[motif][j]:
if labels[motif][i] < labels[motif][j]:
labels[motif][j] = labels[motif][i]
else:
labels[motif][i] = labels[motif][j]
change = True
break
unique_ids = []
for l in labels[motif]:
if l != -1 and l not in unique_ids:
unique_ids.append(l)
if verbose:
print("unique_ids {} {}".format(motif, unique_ids))
for uid in unique_ids:
maxq = 0.0
imaxq = -1
for i in range(nintersites):
if labels[motif][i] == uid:
if imaxq < 0 or trialsites[i]["opval"] > maxq:
imaxq = i
maxq = trialsites[i]["opval"]
include.append(imaxq)
# Prune by symmetry.
multiplicity = {}
discard = []
for motif in unique_motifs:
discard_motif = []
for indi, i in enumerate(include):
if trialsites[i]["mtype"] != motif or \
i in discard_motif:
continue
multiplicity[i] = 1
symposlist = [trialsites[i]["fracs"].dot(
np.array(m, dtype=float)) for m in rots]
for t in trans:
symposlist.append(trialsites[i]["fracs"]+np.array(t))
for indj in range(indi+1, len(include)):
j = include[indj]
if trialsites[j]["mtype"] != motif or \
j in discard_motif:
continue
for sympos in symposlist:
dist, image = struct.lattice.get_distance_and_image(
sympos, trialsites[j]["fracs"])
if dist < maxdl * facmaxdl:
discard_motif.append(j)
multiplicity[i] += 1
break
for i in discard_motif:
if i not in discard:
discard.append(i)
if verbose:
print("Initial trial sites: {}\nAfter clustering: {}\n"
"After symmetry pruning: {}".format(
len(trialsites), len(include),
len(include)-len(discard)))
c = 0
for i in include:
if i not in discard:
self._defect_sites.append(
PeriodicSite(
Element(inter_elem),
trialsites[i]["fracs"],
self._structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=None))
self._defect_types.append(trialsites[i]["mtype"])
self._defect_cns.append(trialsites[i]["cns"])
self._defect_site_multiplicity.append(multiplicity[i])
def enumerate_defectsites(self):
"""
Get all defect sites.
Returns:
defect_sites ([PeriodicSite]): list of periodic sites
representing the interstitials.
"""
return self._defect_sites
def get_motif_type(self, i):
"""
Get the motif type of defect with index i (e.g., "tet").
Returns:
motif (string): motif type.
"""
return self._defect_types[i]
def get_coordinating_elements_cns(self, i):
"""
Get element-specific coordination numbers of defect with index i.
Returns:
elem_cn (dict): dictionary storing the coordination numbers (int)
with string representation of elements as keys.
(i.e., {elem1 (string): cn1 (int), ...}).
"""
return self._defect_cns[i]
def make_supercells_with_defects(self, scaling_matrix):
"""
Generate a sequence of supercells
in which each supercell contains a single interstitial,
except for the first supercell in the sequence
which is a copy of the defect-free input structure.
Args:
scaling_matrix (3x3 integer array): scaling matrix
to transform the lattice vectors.
Returns:
scs ([Structure]): sequence of supercells.
"""
scs = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
scs.append(sc)
for ids, defect_site in enumerate(self._defect_sites):
sc_with_inter = sc.copy()
sc_with_inter.append(defect_site.species_string,
defect_site.frac_coords,
coords_are_cartesian=False,
validate_proximity=False,
properties=None)
if not sc_with_inter:
raise RuntimeError("could not generate supercell with"
" interstitial {}".format(ids+1))
scs.append(sc_with_inter.copy())
return scs
| xhqu1981/pymatgen | pymatgen/analysis/defects/point_defects.py | Python | mit | 72,613 | [
"GULP",
"pymatgen"
] | d9ed6d4183082fa715d4a8e956f02cabd9244816900c3ba098ec3b9dd8dde496 |
import numpy as np
from scipy.optimize import curve_fit, minimize
from scipy.special import erf
from copy import copy
def CDF_distance(cdf1, cdf2):
"""
Very simple function to return distance between two CDFs.
"""
return np.max( np.abs(cdf2 - cdf1))
def compute_cdf(x, y):
"""
Given x and y data points, compute the CDF
"""
return CDF
class general_functions():
def __init__(self, name, function = None):
self.name = name
if function is not None:
self._f = function
self.p0 = None
self.popt = None
self.pcov = None
self.fit_in_logspace = False
return
def fit_function(self, xdata, ydata, method = 'curve_fit', data_cdf = None, *args, **kwargs):
"""
Fit function to data. By default, this uses the scipy method 'curve_fit', but the
'KS' can be provided as method to minimize the distance between the CDFs of the two
functions. If 'KS' is used, the data CDF is integrated using numpy trapz, but it would
possibly be better to provide the CDF computed separately, using `data_cdf' argument.
"""
if 'p0' in kwargs.keys():
self.p0 = kwargs['p0']
if method == 'curve_fit': # use scipy curve fitting
if self.fit_in_logspace:
self.popt, self.pcov = curve_fit(self._logf, xdata, np.log10(ydata),
*args, **kwargs)
#print self.name, self.popt, self._logf(xdata,*self.popt), np.log10(ydata)
else:
self.popt, self.pcov = curve_fit(self._f, xdata, ydata,
*args, **kwargs)
elif method == 'KS':
# optimize the fit by minimizing distance between CDF
if not 'p0' in kwargs.keys():
print("Must supply initial guess if using KS method")
raise ValueError
else:
del kwargs['p0']
# compute the CDF from the data to fit
if data_cdf is None:
data_cdf = compute_cdf(xdata, ydata)
func = lambda x0 : CDF_distance(self._CDF(xdata, *x0), data_cdf)
result = minimize(func, self.p0, *args, **kwargs)
self.popt = result.x
self.pcov = None
return self.popt, self.pcov
# def assign_function(self, function):
# self._f = function
# return
class power_law(general_functions):
def __init__(self):
general_functions.__init__(self,'powerlaw')
self.fit_in_logspace = True
return
def _f(self, x, k, a):
return 10.0**(self._logf(x,k,a))
def _logf(self, x, k, a):
return -1.0 *k * np.log10(x) + np.log10(a)
def _CDF(self, x, k, a, norm = False):
CDF = np.zeros(np.size(x))
CDF = a /((-k) + 1.0) * x**(-k + 1)
if norm:
CDF = CDF / np.max(CDF)
return CDF
class truncated_powerlaw(general_functions):
def __init__(self):
general_functions.__init__(self, 'truncated_powerlaw')
self.fit_in_logspace = True
return
def _f(self, x, k, a, x_t):
fx = np.zeros(np.size(x))
fx[ x < x_t] = 0.0
fx[ x > x_t] = 10.0**(self._logf(x[x>x_t], k, a, x_t))
return fx
def _logf(self, x, k, a, x_t):
fx = np.zeros(np.size(x))
fx[x < x_t] = -99999
fx[x > x_t] = -1.0 * k * np.log10(x[x>x_t]) + np.log10(a)
return fx
def _CDF(self, x, k, a, x_t, norm = False):
CDF = np.zeros(np.size(x))
CDF[x < x_t] = 0.0
CDF[x > x_t] = power_law._CDF(x[x > x_t], k, a)
return CDF
class gaussian(general_functions):
def __init__(self, fix_mean = None):
general_functions.__init__(self, 'gaussian')
self._mu = fix_mean
return
def _CDF(self, x, mu, sigma):
if not (self._mu is None):
mu = self._mu
CDF = 0.5 * (1.0 + erf( x / np.sqrt(2.0)))
return CDF
def _f(self, x, mu, sigma):
if not (self._mu is None):
mu = self.mu
fx = (1.0 / (np.sqrt(2.0 * np.pi)*sigma) *\
np.exp(- (x - sigma)*(x - sigma) / (2.0 * sigma * sigma)))
return fx
def print_parameters(self):
print("mu (mean of data) and sigma (standard deviation of data)")
class gaussian_powerlaw(general_functions):
def __init__(self):
"""
Fit a gaussian + power law PDF
"""
general_functions.__init__(self, 'gaussian_powerlaw')
self.fit_in_logspace = True
return
def _f(self, x, mu, alpha, sigma):
N = 1.0
fx = np.zeros(np.size(x))
xlog = np.log(x)
self.xt = self.st + self.full_mean
s = x - self.full_mean
mu = mu - self.full_mean
p_o = 1.0/(np.sqrt(2.0*np.pi)*sigma) * np.exp(-1.0*(self.st - mu)**2 / (2.0*sigma*sigma) + alpha*self.st)
# N = 2.0 * (1.0 + erf( (2.0*self.st + sigma*sigma) / (2.0**(3.0/2.0)*sigma)) - (2.0*p_o*self.xt**(-alpha))/(-alpha))**(-1)
fx[ s < self.st] = N / (np.sqrt(2.0*np.pi)*sigma) * np.exp(-1.0 * (s[s<self.st] - mu)**2 / (2.0*sigma*sigma))
fx[ s > self.st] = N * p_o * np.exp(-alpha * s[s>self.st])
self.N = N
self.sigma = sigma
self.p_o = p_o
return fx
def _logf(self, x, *args):
fvals = np.log10(self._f(x, *args))
return fvals
def fit_function(self, xdata, ydata, method = 'curve_fit', data_cdf = None, *args, **kwargs):
"""
Fit function to data. By default, this uses the scipy method 'curve_fit', but the
'KS' can be provided as method to minimize the distance between the CDFs of the two
functions. If 'KS' is used, the data CDF is integrated using numpy trapz, but it would
possibly be better to provide the CDF computed separately, using `data_cdf' argument.
"""
if 'p0' in kwargs.keys():
self.p0 = kwargs['p0']
min_error = np.inf
all_xt = np.logspace( np.log10(xdata[np.argmax(ydata)]), np.log10(np.max(xdata)), np.size(xdata)*2)
none_successful = True
for xt in all_xt:
self.st = xt - self.full_mean
try:
if self.fit_in_logspace:
self.popt, self.pcov = curve_fit(self._logf, xdata, np.log10(ydata),
*args, **kwargs)
#print self.name, self.popt, self._logf(xdata,*self.popt), np.log10(ydata)
else:
self.popt, self.pcov = curve_fit(self._f, xdata, ydata,
*args, **kwargs)
none_successful = False
except:
continue
y_fit = self._f(xdata, *self.popt)
error = np.sum( (y_fit - ydata)**2 / ydata )
if error < min_error:
optimal_st = 1.0*self.st
optimal_popt = copy(self.popt)
optimal_pcov = copy(self.pcov)
min_error = error
if none_successful:
self.popt = None ; self.pcov = None; self.st = None
print("No fit found for the " + self.name)
raise RuntimeError
else:
self.popt = optimal_popt
self.pcov = optimal_pcov
self.st = optimal_st
return self.popt, self.pcov
class lognormal_powerlaw(general_functions):
"""
Following Chen, Burkhart, Goodman, and Collins 2018
"""
def __init__(self):
"""
Fit a lognormal + power law tail PDF
"""
general_functions.__init__(self,'lognormal_powerlaw')
self.fit_in_logspace = True
return
# def _f(self, x, mu, alpha, p_o, N):
def _f(self, x, mu, alpha, sigma):
N = 1.0
fx = np.zeros(np.size(x))
xlog = np.log(x)
self.xt = self.full_mean * np.exp(self.st)
# sigma = np.sqrt(-0.5*mu) # try this
s = np.log(x / self.full_mean) #/ self.full_mean)
mu = mu - np.log( self.full_mean)
# sigma = np.sqrt(-0.5 * mu)
p_o = 1.0/(np.sqrt(2.0*np.pi)*sigma*self.xt) * np.exp(-1.0*(self.st-mu)**2 / (2.0*sigma*sigma) + alpha*self.st)
# N = (0.5 * ( 1.0 + erf( (self.st - mu)/(np.sqrt(2)*sigma))) + (self.xt*p_o/(-1.0+alpha)) * (self.xt/self.full_mean)**(-alpha+1.0))**(-1)
fx[ s < self.st] = N / (np.sqrt(2.0*np.pi)*sigma*x[s<self.st]) * np.exp(-1.0 * (s[s<self.st] - mu)**2 / (2.0*sigma*sigma))
fx[ s > self.st] = N * p_o * np.exp(-alpha * s[s>self.st])
self.N = N
self.sigma = sigma
self.p_o = p_o
return fx
def _logf(self, x, *args):
fvals = np.log10(self._f(x, *args))
return fvals
def fit_function(self, xdata, ydata, method = 'curve_fit', data_cdf = None, *args, **kwargs):
"""
Fit function to data. By default, this uses the scipy method 'curve_fit', but the
'KS' can be provided as method to minimize the distance between the CDFs of the two
functions. If 'KS' is used, the data CDF is integrated using numpy trapz, but it would
possibly be better to provide the CDF computed separately, using `data_cdf' argument.
"""
if 'p0' in kwargs.keys():
self.p0 = kwargs['p0']
min_error = np.inf
all_xt = np.logspace( np.log10(xdata[np.argmax(ydata)]), np.log10(np.max(xdata)), np.size(xdata)*2)
for xt in all_xt:
self.st = np.log( xt / self.full_mean)
try:
if self.fit_in_logspace:
self.popt, self.pcov = curve_fit(self._logf, xdata, np.log10(ydata),
*args, **kwargs)
#print self.name, self.popt, self._logf(xdata,*self.popt), np.log10(ydata)
else:
self.popt, self.pcov = curve_fit(self._f, xdata, ydata,
*args, **kwargs)
except:
continue
y_fit = self._f(xdata, *self.popt)
error = np.sum( (y_fit - ydata)**2 / ydata )
if error < min_error:
optimal_st = 1.0*self.st
optimal_popt = copy(self.popt)
optimal_pcov = copy(self.pcov)
min_error = error
self.popt = optimal_popt
self.pcov = optimal_pcov
self.st = optimal_st
return self.popt, self.pcov
class lognormal(general_functions):
def __init__(self, fix_mean = None):
general_functions.__init__(self, 'lognormal')
self._mu = fix_mean
self.fit_in_logspace = True
return
def _CDF(self, x, mu, sigma):
if not (self._mu is None):
mu = self._mu
CDF = 0.5 * (1.0 + erf( (np.log(x) - mu)/(sigma*np.sqrt(2.0))))
return CDF
def _f(self, x, mu, sigma):
"""
Actual Function
"""
if not (self._mu is None):
mu = self.mu
fx = (1.0 / (x * sigma * np.sqrt(2.0*np.pi)))
fx *= np.exp( -1.0 * (np.log(x) - mu)**2 / (2.0 * sigma * sigma))
return fx
def _logf(self, x, *args):
return np.log10( self._f(x, *args))
def print_parameters(self):
print("mu (mean of logged data) and sigma (standard deviaion of logged data)")
by_name = {'log-normal' : lognormal,
'powerlaw' : power_law,
'truncated_powerlaw' : truncated_powerlaw,
'gaussian' : gaussian,
'lognormal_powerlaw' : lognormal_powerlaw,
'gaussian_powerlaw' : gaussian_powerlaw}
| aemerick/galaxy_analysis | utilities/functions.py | Python | mit | 11,888 | [
"Gaussian"
] | 3a7ce3399e60c5a7616136375aad131479af1c20b9697d70f983463276ce6e17 |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
try:
# for Python2
from Tkinter import *
except ImportError:
# for Python3
from tkinter import *
import time
import random
CANVAS_WIDTH = 400
CANVAS_HEIGHT = 400
CELL_WIDTH = 10
CELL_HEIGHT = 10
# Used to locate the x coordinate of neighbors.
NEIGHBORS_X = [0, 0, 1, -1, -1, 1, -1, 1]
# Used to locate the y coordinate of neighbors.
NEIGHBORS_Y = [1, -1, 0, 0, -1, -1, 1, 1]
class App():
"""
Conway's game of life.
"""
def __init__(self):
self.root = Tk()
self.c = Canvas(self.root,width=400, height=400)
self.c.pack()
rows = int(CANVAS_HEIGHT / CELL_HEIGHT)
cols = int(CANVAS_WIDTH / CELL_WIDTH)
self.grid_cells1 = [[0 for x in range(rows)] for x in range(cols)]
self.grid_cells2 = [[0 for x in range(rows)] for x in range(cols)]
self.grid_rects = [[None for j in range(cols)] for i in range(rows)]
for row in range(0,rows):
for col in range(0,cols):
x = col * CELL_WIDTH
y = row * CELL_HEIGHT
b = bool(random.getrandbits(1))
self.grid_cells1[row][col] = b
if b:
r = self.c.create_rectangle(x, y, x+CELL_WIDTH,y+CELL_HEIGHT, outline="black", fill="black")
else:
r = self.c.create_rectangle(x, y, x+CELL_WIDTH,y+CELL_HEIGHT, outline="black", fill="white")
self.grid_rects[row][col] = r
self.update_clock()
self.root.mainloop()
def update_clock(self):
rows = int(CANVAS_HEIGHT / CELL_HEIGHT)
cols = int(CANVAS_WIDTH / CELL_WIDTH)
for row in range(0,rows):
for col in range(0,cols):
total = 0
for i in range(0,len(NEIGHBORS_X)):
n_col = col + NEIGHBORS_X[i]
n_row = row + NEIGHBORS_Y[i]
if n_col >= 0 and n_col < cols:
if n_row >= 0 and n_row < rows:
if self.grid_cells1[n_row][n_col]:
total=total+1
alive = self.grid_cells1[row][col]
if alive:
# 1. Any live cell with fewer than two live neighbors dies, as if caused by under-population.
if total < 2:
alive = False
# 2. Any live cell with two or three live neighbors lives on to the next generation. (not needed)
# 3. Any live cell with more than three live neighbors dies, as if by overcrowding.
if alive and total > 3:
alive = False
else:
# 4. Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
if total == 3:
alive = True
self.grid_cells2[row][col] = alive
for row in range(0,rows):
for col in range(0,cols):
r = self.grid_rects[row][col]
if self.grid_cells2[row][col]:
self.c.itemconfig(r, fill="black")
else:
self.c.itemconfig(r, fill="white")
self.grid_cells1[row][col] = self.grid_cells2[row][col]
self.root.after(100, self.update_clock)
app=App() | trenton3983/Artificial_Intelligence_for_Humans | vol2/vol2-python-examples/examples/example_conway.py | Python | apache-2.0 | 4,382 | [
"VisIt"
] | d78de31c0e252482a0273806037dfc9d538248c097c7638469ed2f5869c27cfe |
####################################################################
# Rdesigneur example 3.2
# Making a myelinated axon with a propagating action potential.
####################################################################
import numpy as np
import moose
import pylab
import rdesigneur as rd
numAxonSegments = 405
nodeSpacing = 100
comptLen = 10e-6
comptDia = 2e-6 # 2x usual
RM = 100.0 # 10x usual
RA = 5.0
CM = 0.001 # 0.1x usual
nodeDia = 1e-6
nodeRM = 1.0
nodeCM = 0.01
def makeAxonProto():
axon = moose.Neuron( '/library/axon' )
x = 0.0
y = 0.0
prev = rd.buildCompt( axon, 'soma', RM = RM, RA = RA, CM = CM, dia = 10e-6, x=0, dx=comptLen)
theta = 0
x = comptLen
for i in range( numAxonSegments ):
r = comptLen
dx = comptLen * np.cos( theta )
dy = comptLen * np.sin( theta )
r = np.sqrt( x * x + y * y )
theta += comptLen / r
if i % nodeSpacing == 0:
compt = rd.buildCompt( axon, 'axon' + str(i), RM = nodeRM, RA = RA, CM = nodeCM, x = x, y = y, dx = dx, dy = dy, dia = nodeDia )
else:
compt = rd.buildCompt( axon, 'axon' + str(i), RM = RM, RA = RA, CM = CM, x = x, y = y, dx = dx, dy = dy, dia = comptDia )
moose.connect( prev, 'axial', compt, 'raxial' )
prev = compt
x += dx
y += dy
return axon
moose.Neutral( '/library' )
makeAxonProto()
rdes = rd.rdesigneur(
chanProto = [['make_HH_Na()', 'Na'], ['make_HH_K()', 'K']],
cellProto = [['elec','axon']],
chanDistrib = [
['Na', '#', 'Gbar', '12000 * (dia < 1.5e-6)' ],
['K', '#', 'Gbar', '3600 * (dia < 1.5e-6)' ]],
stimList = [['soma', '1', '.', 'inject', '(t>0.01 && t<0.2) * 1e-10' ]],
plotList = [['soma,axon100,axon200,axon300,axon400', '1', '.', 'Vm', 'Membrane potential']],
moogList = [['#', '1', '.', 'Vm', 'Vm (mV)']]
)
rdes.buildModel()
moose.reinit()
rdes.displayMoogli( 0.00005, 0.05, 0.0 )
| BhallaLab/moose-examples | tutorials/Rdesigneur/ex3.4_myelinated_axon.py | Python | gpl-2.0 | 1,957 | [
"MOOSE",
"NEURON"
] | c9b395a6b97e7d859e1a46445c4b21c720b33d3ed35ab08dfd573a9f920a43f4 |
import cv2
import numpy as np
import math
#import requests
#import json
import time
class Neuron(object):
def __init__(self, weights):
splitted = weights.replace('\n', '').split(' ')
self.weights = [float(x) for x in splitted]
def forward(self, x):
if len(self.weights) != len(x):
raise Exception('neuron weights with input does not match')
add = 0.0
for i in range(0, len(x)):
add += self.weights[i] * x[i]
return max(0.0, add)
class Network(object):
def __init__(self, filename):
self.neurons = []
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
self.neurons.append(Neuron(line))
def move(self, distances):
results = []
distances.append(1.0)
max_val = 0.0
index = 0
for idx, neuron in enumerate(self.neurons):
result = math.exp(neuron.forward(distances))
if result > max_val:
max_val = result
index = idx
return index
def main():
from robot import Robot
network = Network('data.weights')
robot = Robot()
while 'pigs' != 'fly':
distances = robot.distance()
if distances[0] > 0.95 and distances[1] > 0.95 and distances[2] > 0.95:
continue
print distances
move = network.move(distances)
print move
if move is 0:
robot.left()
#robot.left()
elif move is 1:
robot.right()
#robot.right()
else:
robot.forward()
robot.close()
if __name__ == '__main__':
main()
| kazepilot/backbone | network.py | Python | mit | 1,683 | [
"NEURON"
] | 1acf4e759bf89fad14ff0e338e057ed78a6497a26081234da1512f8a53c074ef |
#!/usr/bin/python3-utf8
# -*- coding: utf-8 -*-
import configparser
import datetime
import json
import os
import re
import sqlite3
import sys
import time
import urllib.parse
from classes.template_engine import TemplateEngine
from classes.galaxy_db import GalaxyDB
from classes.xnova_utils import PageDownloader, XNGalaxyParser, xnova_authorize
def debugprint(obj=None):
print('Content-Type: text/plain; charset=utf-8')
print()
print('MODE={0}'.format(MODE))
print('QUERY_STRING={0}'.format(QUERY_STRING))
print('QUERY_PARAMS={0}'.format(str(QUERY_PARAMS)))
print('AJAX_ACTION={0}'.format(AJAX_ACTION))
if obj is not None:
print(str(obj))
exit()
def output_as_json(obj):
print('Content-Type: application/json; charset=utf-8')
print()
print(json.dumps(obj))
def xn_res_str(n: int) -> str:
if n is None:
return '0'
millions = n // 1000000
n -= millions * 1000000
if millions == 0:
k = n // 1000
if k > 0:
return str(k) + 'K'
return '0'
k = round(n / 100000)
if k > 0:
return str(millions) + '.' + str(k) + 'M'
return str(millions) + 'M'
QUERY_STRING = ''
QUERY_PARAMS = dict()
if 'QUERY_STRING' in os.environ:
QUERY_STRING = os.environ['QUERY_STRING']
MODE = ''
AJAX_ACTION = ''
GMAP_MODE = ''
GMAP_OBJECTS = ''
GMAP_NAME = ''
if QUERY_STRING != '':
QUERY_PARAMS = urllib.parse.parse_qs(QUERY_STRING)
if 'ajax' in QUERY_PARAMS:
MODE = 'ajax'
if len(QUERY_PARAMS['ajax']) > 0:
AJAX_ACTION = QUERY_PARAMS['ajax'][0]
if 'galaxymap' in QUERY_PARAMS:
MODE = 'galaxymap'
if len(QUERY_PARAMS['galaxymap']) > 0:
GMAP_MODE = QUERY_PARAMS['galaxymap'][0]
if 'objects' in QUERY_PARAMS:
if len(QUERY_PARAMS['objects']) > 0:
GMAP_OBJECTS = QUERY_PARAMS['objects'][0]
if 'name' in QUERY_PARAMS:
if len(QUERY_PARAMS['name']) > 0:
GMAP_NAME = QUERY_PARAMS['name'][0]
def req_param(name, def_val=None):
if len(QUERY_PARAMS) < 1:
return def_val
if name in QUERY_PARAMS:
if len(QUERY_PARAMS[name]) > 0:
return QUERY_PARAMS[name][0]
return def_val
def fit_in_range(v: int, lower_range: int, upper_range: int) -> int:
if v < lower_range:
v = lower_range
if v > upper_range:
v = upper_range
return v
def get_file_mtime_utc(fn: str) -> datetime.datetime:
try:
fst = os.stat(fn)
# construct datetime object as timezone-aware
dt = datetime.datetime.fromtimestamp(fst.st_mtime, tz=datetime.timezone.utc)
return dt
except FileNotFoundError:
return None
def get_file_mtime_utc_for_template(fn: str) -> str:
dt = get_file_mtime_utc(fn)
if dt is None:
return 'never'
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
def get_file_mtime_msk_for_template(fn: str) -> str:
dt = get_file_mtime_utc(fn) # now already returns TZ-aware datetime object
if dt is None:
return ''
# create MSK timezone object
tz_msk = datetime.timezone(datetime.timedelta(hours=3))
# convert UTC datetime to MSK datetime
dt_msk = dt.astimezone(tz=tz_msk)
return dt_msk.strftime('%Y-%m-%d %H:%M:%S MSK')
if AJAX_ACTION == 'grid':
ret = None
# player/alliance searches
# GET /xnova/index.py?ajax=grid&query=minlexx&category=player
# GET /xnova/index.py?ajax=grid&query=minlexx&category=player&sort=user_name&order=desc
# inactives searches
# GET /xnova/index.py?ajax=grid&category=inactives&user_flags=iIGU&gals=12345&s_min=1&s_max=499&min_rank=0
# parse request
val = req_param('query')
cat = req_param('category')
s_col = req_param('sort') # may be None
s_order = req_param('order') # may be None
user_flags = req_param('user_flags')
gals = req_param('gals', '12345')
s_min = req_param('s_min', '1')
s_max = req_param('s_max', '499')
min_rank = req_param('min_rank', '0')
if (val is not None) and (cat is not None):
gdb = GalaxyDB()
val += '%' # ... WHERE user_name LIKE 'value%'
if cat == 'player':
ret = gdb.query_like('user_name', val, s_col, s_order)
elif cat == 'alliance':
ret = gdb.query_like(['ally_name', 'ally_tag'], val, s_col, s_order)
if cat is not None:
if (cat == 'inactives') and (user_flags is not None):
# - covert any char in gals to integer
# - check it is in range [1..5]
# - do not add any duplicates to list
gal_ints = [] # resulting list
for g in gals:
g = GalaxyDB.safe_int(g)
g = fit_in_range(g, 1, 5)
if g not in gal_ints:
gal_ints.append(g)
# covert systems range to ints,
# make sure s_min is <= s_max
# make sure values are in range [1..499]
s_min = GalaxyDB.safe_int(s_min)
s_max = GalaxyDB.safe_int(s_max)
if s_min > s_max:
t = s_min
s_min = 5
s_max = t
s_min = fit_in_range(s_min, 1, 499)
s_max = fit_in_range(s_max, 1, 499)
min_rank = GalaxyDB.safe_int(min_rank)
min_rank = fit_in_range(min_rank, 0, 1000000)
# go!
gdb = GalaxyDB()
ret = gdb.query_inactives(user_flags, gal_ints, s_min, s_max, min_rank, s_col, s_order)
# fix empty response
if ret is None:
ret = dict()
if 'rows' not in ret: # ret should have rows
ret['rows'] = []
ret['total'] = len(ret['rows']) # ret should have total count:
# extra debug data
ret['QUERY_STRING'] = QUERY_STRING
output_as_json(ret)
exit()
if AJAX_ACTION == 'lastactive':
ret = dict()
ret['rows'] = []
ret['total'] = 0
#
player_name = req_param('query')
if (player_name is not None) and (player_name != ''):
gdb = GalaxyDB()
planets_info = gdb.query_player_planets(player_name)
# ret['planets_info'] = planets_info # checked - this is OK
# list of dicts [{'g': 1, 's': 23, 'p': 9, ...}, {...}, {...}, ...]
if len(planets_info) > 0:
# 1. cookies_dict = xnova_authorize('uni5.xnova.su', 'login', 'password')
# 2. hope this cookie will live long enough
# cookies_dict = {
# 'u5_id': '87',
# 'u5_secret': 'c...7',
# 'u5_full': 'N'
# }
# 3. read from config file
cookies_dict = {
'u5_id': '0',
'u5_secret': '',
'u5_full': 'N'
}
cfg = configparser.ConfigParser()
cfgs_read = cfg.read(['config.ini'])
if 'config.ini' not in cfgs_read:
ret['error'] = 'Failed to load xnova auth cookies from config.ini'
output_as_json(ret)
exit()
if 'lastactive' not in cfg.sections():
ret['error'] = 'Cannot find [lastactive] section in config.ini'
output_as_json(ret)
exit()
cookies_dict = xnova_authorize('uni5.xnova.su',
cfg['lastactive']['xn_login'],
cfg['lastactive']['xn_password'])
if cookies_dict is None:
ret['error'] = 'Failed to authorize to xnova site!'
output_as_json(ret)
exit()
#
dnl = PageDownloader(cookies_dict=cookies_dict)
gparser = XNGalaxyParser()
cached_pages = dict() # coords -> page_content
for pinfo in planets_info:
# try to lookup page in a cache, with key 'galaxy,system'
coords_str = str(pinfo['g']) + ',' + str(pinfo['s']) # '1,23'
if coords_str in cached_pages:
page_content = cached_pages[coords_str]
else:
page_content = dnl.download_url_path('galaxy/{0}/{1}/'.format(
pinfo['g'], pinfo['s']), return_binary=False)
# seems to work, for now...
if page_content is None:
ret['error'] = 'Failed to download, ' + dnl.error_str
ret['rows'] = []
break
else:
cached_pages[coords_str] = page_content # save to cache
# ret['page'] = page_content # checked, galaxy page loaded OK
# now need to parse it
gparser.clear()
gparser.parse_page_content(page_content)
galaxy_rows = gparser.unscramble_galaxy_script()
if galaxy_rows is None:
ret['error'] = 'Failed to parse galaxy page, ' + gparser.error_str
ret['rows'] = []
break
else:
# parse OK
for planet_row in galaxy_rows:
if planet_row is not None:
planet_pos = GalaxyDB.safe_int(planet_row['planet'])
if planet_pos == pinfo['p']:
ret_row = dict()
ret_row['planet_name'] = planet_row['name']
ret_row['luna_name'] = ''
if planet_row['luna_name'] is not None:
ret_row['luna_name'] = planet_row['luna_name']
ret_row['coords_link'] = '<a href="http://uni5.xnova.su/galaxy/{0}/{1}/">' \
'[{0}:{1}:{2}]</a>'.format(pinfo['g'], pinfo['s'], pinfo['p'])
ret_row['lastactive'] = planet_row['last_active']
ret['rows'].append(ret_row)
# recalculate total rows count
ret['total'] = len(ret['rows'])
pass
#
output_as_json(ret)
exit()
if AJAX_ACTION == 'lastlogs':
# /xnova/index.py?ajax=lastlogs
# /xnova/index.py?ajax=lastlogs&value=24&category=hours&nick=Nickname
# category may be 'days'
cat = req_param('category', 'hours') # default - hours
val = int(req_param('value', 24)) # default - 24 hours
nick = req_param('nick', '') # default - empty
#
tm_now = int(time.time())
requested_time_interval_hrs = val
if cat == 'days':
requested_time_interval_hrs = 24 * val # specified number of days
requested_time_interval_secs = requested_time_interval_hrs * 3600
min_time = tm_now - requested_time_interval_secs
#
ret = dict()
ret['rows'] = []
# Debug: :)
# ret['min_time'] = min_time
# ret['cur_time'] = tm_now
# ret['requested_time_interval_hrs'] = requested_time_interval_hrs
log_rows = []
sqconn = sqlite3.connect('lastlogs5.db')
cur = sqconn.cursor()
#
# check if table 'logs' exists
cur.execute("SELECT COUNT(*) FROM sqlite_master WHERE name='logs' AND type='table'")
rows = cur.fetchall()
if (len(rows) != 1) or (rows[0][0] != 1):
ret['rows'] = []
ret['total'] = 0
ret['msg'] = 'table not found: logs'
output_as_json(ret)
exit()
#
if nick != '':
q = 'SELECT log_id, log_time, attacker, defender, attacker_coords, defender_coords, ' \
' total_loss, po_me, po_cry, win_me, win_cry, win_deit ' \
'FROM logs ' \
"WHERE (log_time >= ?) AND ((attacker LIKE ?) OR (defender LIKE ?))" \
'ORDER BY log_time DESC'
cur.execute(q, (min_time, nick+'%', nick+'%'))
else:
q = 'SELECT log_id, log_time, attacker, defender, attacker_coords, defender_coords, ' \
' total_loss, po_me, po_cry, win_me, win_cry, win_deit ' \
'FROM logs ' \
'WHERE log_time >= ?' \
'ORDER BY log_time DESC'
cur.execute(q, (min_time, ))
for row in cur.fetchall():
att_c = str(row[4])
def_c = str(row[5])
att_c_link = ''
def_c_link = ''
m = re.search(r'\[(\d+):(\d+):(\d+)\]', att_c)
if m is not None:
coord_g = int(m.group(1))
coord_s = int(m.group(2))
att_c_link = 'http://uni5.xnova.su/galaxy/{0}/{1}/'.format(coord_g, coord_s)
m = re.search(r'\[(\d+):(\d+):(\d+)\]', def_c)
if m is not None:
coord_g = int(m.group(1))
coord_s = int(m.group(2))
def_c_link = 'http://uni5.xnova.su/galaxy/{0}/{1}/'.format(coord_g, coord_s)
lrow = dict()
lrow['log_id'] = '<a href="http://uni5.xnova.su/log/' + str(row[0]) + '/" target="_blank">#' \
+ str(row[0]) + '</a>'
lrow['log_time'] = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(int(row[1])))
lrow['attacker'] = str(row[2]) + ' <a href="' + att_c_link + '" target="_blank">' + str(row[4]) + '</a>'
lrow['defender'] = str(row[3]) + ' <a href="' + def_c_link + '" target="_blank">' + str(row[5]) + '</a>'
lrow['total_loss'] = xn_res_str(row[6])
lrow['po'] = xn_res_str(row[7]) + ' me, ' + xn_res_str(row[8]) + ' cry'
lrow['win'] = xn_res_str(row[9]) + ' me, ' + xn_res_str(row[10]) + ' cry, ' + xn_res_str(row[11]) + ' deit'
log_rows.append(lrow)
ret['rows'] = log_rows
ret['total'] = len(log_rows)
output_as_json(ret)
exit()
if AJAX_ACTION == 'gmap_population':
gdb = GalaxyDB()
population_data = []
for g in range(1, 5): # includes 0, not includes 5: [0..4]
for s in range(1, 500): # [1..499]
population_data.append(gdb.query_planets_count(g, s))
output_as_json(population_data)
exit()
if MODE == 'galaxymap':
from classes.img_gen_pil import generate_background, get_image_bytes, draw_galaxy_grid, \
draw_population, draw_moons, draw_player_planets, draw_alliance_planets
def output_img(img):
img_bytes = get_image_bytes(img, 'PNG')
print('Content-Type: image/png')
print('')
sys.stdout.flush()
os.write(sys.stdout.fileno(), img_bytes)
sys.stdout.flush()
grid_color = (128, 128, 255, 255)
img = generate_background()
draw_population(img)
if GMAP_MODE == 'population':
draw_population(img)
elif GMAP_MODE == 'moons':
draw_moons(img)
elif GMAP_MODE == 'player':
only_moons = False
if GMAP_OBJECTS == 'moons':
only_moons = True
draw_player_planets(img, GMAP_NAME, only_moons)
elif GMAP_MODE == 'alliance':
only_moons = False
if GMAP_OBJECTS == 'moons':
only_moons = True
draw_alliance_planets(img, GMAP_NAME, only_moons)
# finally, common output
draw_galaxy_grid(img, color=grid_color)
output_img(img)
exit()
template = TemplateEngine({
'TEMPLATE_DIR': './html',
'TEMPLATE_CACHE_DIR': './cache'})
template.assign('galaxy_mtime', get_file_mtime_msk_for_template('galaxy5.db'))
template.assign('lastlogs_mtime', get_file_mtime_msk_for_template('lastlogs5.db'))
template.output('index.html')
| minlexx/xnova_galaxy_parser | site_uni5/index.py | Python | gpl-3.0 | 15,457 | [
"Galaxy"
] | 26fbcbf49e1403183ab1723e7ed30d5bc87058435f8585929716be60b064d5c5 |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Map the residues of two structures to each other based on a FASTA alignment
file.
"""
from __future__ import print_function
from Bio.Data import SCOPData
from Bio.PDB import Selection
from Bio.PDB.Polypeptide import is_aa
class StructureAlignment(object):
"""
This class aligns two structures based on an alignment of their
sequences.
"""
def __init__(self, fasta_align, m1, m2, si=0, sj=1):
"""
Attributes:
- fasta_align --- Alignment object
- m1, m2 --- two models
- si, sj --- the sequences in the Alignment object that
correspond to the structures
"""
l = fasta_align.get_alignment_length()
# Get the residues in the models
rl1 = Selection.unfold_entities(m1, 'R')
rl2 = Selection.unfold_entities(m2, 'R')
# Residue positions
p1 = 0
p2 = 0
# Map equivalent residues to each other
map12 = {}
map21 = {}
# List of residue pairs (None if -)
duos = []
for i in range(0, l):
column = fasta_align.get_column(i)
aa1 = column[si]
aa2 = column[sj]
if aa1 != "-":
# Position in seq1 is not -
while True:
# Loop until an aa is found
r1 = rl1[p1]
p1 = p1 + 1
if is_aa(r1):
break
self._test_equivalence(r1, aa1)
else:
r1 = None
if aa2 != "-":
# Position in seq2 is not -
while True:
# Loop until an aa is found
r2 = rl2[p2]
p2 = p2 + 1
if is_aa(r2):
break
self._test_equivalence(r2, aa2)
else:
r2 = None
if r1:
# Map residue in seq1 to its equivalent in seq2
map12[r1] = r2
if r2:
# Map residue in seq2 to its equivalent in seq1
map21[r2] = r1
# Append aligned pair (r is None if gap)
duos.append((r1, r2))
self.map12 = map12
self.map21 = map21
self.duos = duos
def _test_equivalence(self, r1, aa1):
"Test if aa in sequence fits aa in structure."
resname = r1.get_resname()
resname = SCOPData.protein_letters_3to1[resname]
assert(aa1 == resname)
def get_maps(self):
"""
Return two dictionaries that map a residue in one structure to
the equivealent residue in the other structure.
"""
return self.map12, self.map21
def get_iterator(self):
"""
Iterator over all residue pairs.
"""
for i in range(0, len(self.duos)):
yield self.duos[i]
if __name__ == "__main__":
import sys
from Bio.Alphabet import generic_protein
from Bio import AlignIO
from Bio.PDB import PDBParser
if len(sys.argv) != 4:
print("Expects three arguments,")
print(" - FASTA alignment filename (expect two sequences)")
print(" - PDB file one")
print(" - PDB file two")
sys.exit()
# The alignment
fa = AlignIO.read(open(sys.argv[1]), "fasta", generic_protein)
pdb_file1 = sys.argv[2]
pdb_file2 = sys.argv[3]
# The structures
p = PDBParser()
s1 = p.get_structure('1', pdb_file1)
p = PDBParser()
s2 = p.get_structure('2', pdb_file2)
# Get the models
m1 = s1[0]
m2 = s2[0]
al = StructureAlignment(fa, m1, m2)
# Print aligned pairs (r is None if gap)
for (r1, r2) in al.get_iterator():
print("%s %s" % (r1, r2))
| zjuchenyuan/BioWeb | Lib/Bio/PDB/StructureAlignment.py | Python | mit | 4,024 | [
"Biopython"
] | eb17bdb7cd12f8deceb362ce90ce10e4c15ef4bce66c3b9bb0835ce33a4c216a |
import os
import time
from collections import OrderedDict
from io import BytesIO
from translate.storage import po, poheader, poxliff
def test_parseheaderstring():
"""test for the header parsing function"""
source = r"""item1: one
item2: two:two
this item must get ignored because there is no colon sign in it
item3: three
"""
d = poheader.parseheaderstring(source)
print(type(d))
assert len(d) == 3
assert d["item1"] == "one"
assert d["item2"] == "two:two"
assert d["item3"] == "three"
def test_update():
"""test the update function"""
# do we really add nothing if add==False ?
d = poheader.update({}, test="hello")
assert len(d) == 0
# do we add if add==True ?
d = poheader.update({}, add=True, Test="hello")
assert len(d) == 1
assert d["Test"] == "hello"
# do we really update ?
d = poheader.update({"Test": "hello"}, add=True, Test="World")
assert len(d) == 1
assert d["Test"] == "World"
# does key rewrite work ?
d = poheader.update({}, add=True, test_me="hello")
assert d["Test-Me"] == "hello"
# is the order correct ?
d = OrderedDict()
d["Project-Id-Version"] = "abc"
d["POT-Creation-Date"] = "now"
d = poheader.update(d, add=True, Test="hello", Report_Msgid_Bugs_To="bugs@list.org")
assert list(d.keys()) == [
"Project-Id-Version",
"Report-Msgid-Bugs-To",
"POT-Creation-Date",
"Test",
]
def poparse(posource):
"""helper that parses po source without requiring files"""
dummyfile = BytesIO(posource.encode())
return po.pofile(dummyfile)
def poxliffparse(posource):
"""helper that parses po source into poxliffFile"""
poxli = poxliff.PoXliffFile()
poxli.parse(posource)
return poxli
def check_po_date(datestring):
"""Check the validity of a PO date.
The datestring must be in the format: 2007-06-08 10:08+0200
"""
# We don't include the timezone offset as part of our format,
# because time.strptime() does not recognize %z
# The use of %z is deprecated in any case.
date_format = "%Y-%m-%d %H:%M"
# Get the timezone offset (last 4 digits):
tz = datestring[-4:]
assert type(int(tz)) == int
# Strip the timezone from the string, typically something like "+0200".
# This is to make the datestring conform to the specified format,
# we can't add %z to the format.
datestring = datestring[0:-5]
# Check that the date can be parsed
assert type(time.strptime(datestring, date_format)) == time.struct_time
def test_po_dates():
pofile = po.pofile()
headerdict = pofile.makeheaderdict(po_revision_date=True)
check_po_date(headerdict["POT-Creation-Date"])
check_po_date(headerdict["PO-Revision-Date"])
headerdict = pofile.makeheaderdict(
pot_creation_date=time.localtime(), po_revision_date=time.localtime()
)
check_po_date(headerdict["POT-Creation-Date"])
check_po_date(headerdict["PO-Revision-Date"])
def test_timezones():
# The following will only work on Unix because of tzset() and %z
if "tzset" in time.__dict__:
os.environ["TZ"] = "Asia/Kabul"
time.tzset()
assert time.timezone == -16200
# Typically "+0430"
assert poheader.tzstring() == time.strftime("%z")
os.environ["TZ"] = "Asia/Seoul"
time.tzset()
assert time.timezone == -32400
# Typically "+0900"
assert poheader.tzstring() == time.strftime("%z")
os.environ["TZ"] = "Africa/Johannesburg"
time.tzset()
assert time.timezone == -7200
# Typically "+0200"
assert poheader.tzstring() == time.strftime("%z")
os.environ["TZ"] = "UTC"
time.tzset()
assert time.timezone == 0
# Typically "+0000"
assert poheader.tzstring() == time.strftime("%z")
def test_header_blank():
def compare(pofile):
print(pofile)
assert len(pofile.units) == 1
header = pofile.header()
assert header.isheader()
assert not header.isblank()
headeritems = pofile.parseheader()
assert headeritems["Project-Id-Version"] == "PACKAGE VERSION"
assert headeritems["Report-Msgid-Bugs-To"] == ""
check_po_date(headeritems["POT-Creation-Date"])
assert headeritems["PO-Revision-Date"] == "YEAR-MO-DA HO:MI+ZONE"
assert headeritems["Last-Translator"] == "FULL NAME <EMAIL@ADDRESS>"
assert headeritems["Language-Team"] == "LANGUAGE <LL@li.org>"
assert headeritems["MIME-Version"] == "1.0"
assert headeritems["Content-Type"] == "text/plain; charset=UTF-8"
assert headeritems["Content-Transfer-Encoding"] == "8bit"
assert headeritems["Plural-Forms"] == "nplurals=INTEGER; plural=EXPRESSION;"
"""test header functionality"""
posource = r"""# other comment\n
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2006-03-08 17:30+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n"
"""
pofile = poparse(posource)
compare(pofile)
## TODO: enable this code if PoXliffFile is able to parse a header
##
## poxliffsource = r'''<?xml version="1.0" encoding="utf-8"?>
##<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
##
##<file datatype="po" original="test.po" source-language="en-US"><body><trans-unit approved="no" id="1" restype="x-gettext-domain-header" xml:space="preserve"><source>Project-Id-Version: PACKAGE VERSION
##Report-Msgid-Bugs-To:
##POT-Creation-Date: 2006-03-08 17:30+0200
##PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
##Last-Translator: FULL NAME <ph id="1"><EMAIL@ADDRESS></ph>
##Language-Team: LANGUAGE <ph id="2"><LL@li.org></ph>
##MIME-Version: 1.0
##Content-Type: text/plain; charset=UTF-8
##Content-Transfer-Encoding: 8bit
##Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;
##</source><target>Project-Id-Version: PACKAGE VERSION
##Report-Msgid-Bugs-To:
##POT-Creation-Date: 2006-03-08 17:30+0200
##PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
##Last-Translator: FULL NAME <ph id="1"><EMAIL@ADDRESS></ph>
##Language-Team: LANGUAGE <ph id="2"><LL@li.org></ph>
##MIME-Version: 1.0
##Content-Type: text/plain; charset=UTF-8
##Content-Transfer-Encoding: 8bit
##Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;
##</target><context-group name="po-entry" purpose="information"><context context-type="x-po-trancomment">other comment\n</context></context-group><note from="po-translator">other comment\n</note></trans-unit></body></file></xliff>
##'''
## pofile = poparse(poxliffsource)
## compare(pofile)
def test_plural_equation():
"""
test that we work with the equation even is the last semicolon is left out, since gettext
tools don't seem to mind
"""
posource = r"""msgid ""
msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1)%s\n"
"""
for colon in ("", ";"):
pofile = poparse(posource % colon)
print(pofile)
assert len(pofile.units) == 1
header = pofile.units[0]
assert header.isheader()
assert not header.isblank()
pofile.parseheader()
nplural, plural = pofile.getheaderplural()
assert nplural == "2"
assert plural == "(n != 1)"
## TODO: add the same test for PoXliffFile
def test_plural_equation_across_lines():
"""test that we work if the plural equation spans more than one line"""
posource = r"""msgid ""
msgstr ""
"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%"
"10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
"""
pofile = poparse(posource)
print(pofile)
assert len(pofile.units) == 1
header = pofile.units[0]
assert header.isheader()
assert not header.isblank()
pofile.parseheader()
nplural, plural = pofile.getheaderplural()
assert nplural == "3"
assert (
plural
== "(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)"
)
## TODO: add the same test for PoXliffFile
def test_updatecontributor():
"""Test that we can update contributor information in the header comments."""
posource = r"""msgid ""
msgstr ""
"MIME-Version: 1.0"
"""
pofile = poparse(posource)
pofile.updatecontributor("Grasvreter")
assert "# Grasvreter, 20" in bytes(pofile).decode("utf-8")
pofile.updatecontributor("Koeivreter", "monster@grasveld.moe")
assert "# Koeivreter <monster@grasveld.moe>, 20" in bytes(pofile).decode("utf-8")
pofile.header().addnote("Khaled Hosny <khaledhosny@domain.org>, 2006, 2007, 2008.")
pofile.updatecontributor("Khaled Hosny", "khaledhosny@domain.org")
print(bytes(pofile))
assert (
"# Khaled Hosny <khaledhosny@domain.org>, 2006, 2007, 2008, %s."
% time.strftime("%Y")
in bytes(pofile).decode("utf-8")
)
def test_updatecontributor_header():
"""Test preserving empty lines in comments"""
posource = r"""# Japanese translation of ibus.
# Copyright (C) 2015-2019 Takao Fujiwara <takao.fujiwara1@gmail.com>
# This file is distributed under the same license as the ibus package.
#
# Translators:
# Takao Fujiwara <takao.fujiwara1@gmail.com>, 2012
msgid ""
msgstr ""
"MIME-Version: 1.0"
"""
pofile = poparse(posource)
# Add contributor
pofile.updatecontributor("Grasvreter")
# Manually build expected output
expected = posource.replace(
"msgid", "# Grasvreter, %s.\nmsgid" % time.strftime("%Y")
)
assert bytes(pofile).decode("utf-8") == expected
def test_language():
"""Test that we can get a language from the relevant headers."""
posource = r"""msgid ""
msgstr ""
"MIME-Version: 1.0\n"
"""
pofile = poparse(posource)
assert pofile.gettargetlanguage() is None
posource += '"Language-Team: translate-discuss-af@lists.sourceforge.net\\n"\n'
pofile = poparse(posource)
assert pofile.gettargetlanguage() == "af"
posource += '"X-Poedit-Language: German\\n"\n'
pofile = poparse(posource)
assert pofile.gettargetlanguage() == "de"
posource += '"Language: fr_CA\\n"\n'
pofile = poparse(posource)
assert pofile.gettargetlanguage() == "fr_CA"
def test_project():
"""Test that we can get a project from the relevant headers."""
posource = r"""msgid ""
msgstr ""
"MIME-Version: 1.0\n"
"""
pofile = poparse(posource)
assert pofile.getprojectstyle() is None
posource += '"X-Accelerator-Marker: ~\\n"\n'
pofile = poparse(posource)
assert pofile.getprojectstyle() == "openoffice"
posource += '"Report-Msgid-Bugs-To: http://bugzilla.gnome.org/enter_bug.cgi?product=system-\\n"\n'
pofile = poparse(posource)
assert pofile.getprojectstyle() == "gnome"
posource += '"X-Project-Style: drupal\\n"\n'
pofile = poparse(posource)
assert pofile.getprojectstyle() == "drupal"
pofile.setprojectstyle("kde")
assert pofile.getprojectstyle() == "kde"
pofile.setprojectstyle("complete-rubbish")
assert pofile.getprojectstyle() == "complete-rubbish"
| miurahr/translate | translate/storage/test_poheader.py | Python | gpl-2.0 | 11,321 | [
"MOE"
] | e73e95024590a167243c2c181369b582bdc4193fa582185b4fa53bb6664fdcfa |
# -*- coding: utf-8 -*-
import os
import copy
import logging
import shutil
import time
import mathutils
import bpy
import bmesh
from collections import OrderedDict
from mmd_tools.core import pmx
from mmd_tools.core.bone import FnBone
from mmd_tools.core.material import FnMaterial
from mmd_tools.core.morph import FnMorph
from mmd_tools.core.sdef import FnSDEF
from mmd_tools.core.vmd.importer import BoneConverter, BoneConverterPoseMode
from mmd_tools import bpyutils
from mmd_tools.utils import saferelpath
from mmd_tools.bpyutils import matmul
from mmd_tools.operators.misc import MoveObject
class _Vertex:
def __init__(self, co, groups, offsets, edge_scale, vertex_order, uv_offsets):
self.co = co
self.groups = groups # [(group_number, weight), ...]
self.offsets = offsets
self.edge_scale = edge_scale
self.vertex_order = vertex_order # used for controlling vertex order
self.uv_offsets = uv_offsets
self.index = None
self.uv = None
self.normal = None
self.sdef_data = [] # (C, R0, R1)
self.add_uvs = [None]*4 # UV1~UV4
class _Face:
def __init__(self, vertices, index=-1):
''' Temporary Face Class
'''
self.vertices = vertices
self.index = index
class _Mesh:
def __init__(self, material_faces, shape_key_names, material_names):
self.material_faces = material_faces # dict of {material_index => [face1, face2, ....]}
self.shape_key_names = shape_key_names
self.material_names = material_names
class _DefaultMaterial:
def __init__(self):
mat = bpy.data.materials.new('')
#mat.mmd_material.diffuse_color = (0, 0, 0)
#mat.mmd_material.specular_color = (0, 0, 0)
#mat.mmd_material.ambient_color = (0, 0, 0)
self.material = mat
logging.debug('create default material: %s', str(self.material))
def __del__(self):
if self.material:
logging.debug('remove default material: %s', str(self.material))
bpy.data.materials.remove(self.material)
class __PmxExporter:
CATEGORIES = {
'SYSTEM': pmx.Morph.CATEGORY_SYSTEM,
'EYEBROW': pmx.Morph.CATEGORY_EYEBROW,
'EYE': pmx.Morph.CATEGORY_EYE,
'MOUTH': pmx.Morph.CATEGORY_MOUTH,
}
def __init__(self):
self.__model = None
self.__bone_name_table = []
self.__material_name_table = []
self.__exported_vertices = []
self.__default_material = None
self.__vertex_order_map = None # used for controlling vertex order
self.__disable_specular = False
self.__add_uv_count = 0
@staticmethod
def flipUV_V(uv):
u, v = uv
return u, 1.0-v
def __getDefaultMaterial(self):
if self.__default_material is None:
self.__default_material = _DefaultMaterial()
return self.__default_material.material
def __sortVertices(self):
logging.info(' - Sorting vertices ...')
weight_items = self.__vertex_order_map.items()
sorted_indices = [i[0] for i in sorted(weight_items, key=lambda x: x[1].vertex_order)]
vertices = self.__model.vertices
self.__model.vertices = [vertices[i] for i in sorted_indices]
# update indices
index_map = {x:i for i, x in enumerate(sorted_indices)}
for v in self.__vertex_order_map.values():
v.index = index_map[v.index]
for f in self.__model.faces:
f[:] = [index_map[i] for i in f]
logging.debug(' - Done (count:%d)', len(self.__vertex_order_map))
def __exportMeshes(self, meshes, bone_map):
mat_map = OrderedDict()
for mesh in meshes:
for index, mat_faces in sorted(mesh.material_faces.items(), key=lambda x: x[0]):
name = mesh.material_names[index]
if name not in mat_map:
mat_map[name] = []
mat_map[name].append(mat_faces)
sort_vertices = self.__vertex_order_map is not None
if sort_vertices:
self.__vertex_order_map.clear()
# export vertices
for mat_name, mat_meshes in mat_map.items():
face_count = 0
for mat_faces in mat_meshes:
mesh_vertices = []
for face in mat_faces:
mesh_vertices.extend(face.vertices)
for v in mesh_vertices:
if v.index is not None:
continue
v.index = len(self.__model.vertices)
if sort_vertices:
self.__vertex_order_map[v.index] = v
pv = pmx.Vertex()
pv.co = v.co
pv.normal = v.normal
pv.uv = self.flipUV_V(v.uv)
pv.edge_scale = v.edge_scale
for _uvzw in v.add_uvs:
if _uvzw:
pv.additional_uvs.append(self.flipUV_V(_uvzw[0])+self.flipUV_V(_uvzw[1]))
t = len(v.groups)
if t == 0:
weight = pmx.BoneWeight()
weight.type = pmx.BoneWeight.BDEF1
weight.bones = [0]
pv.weight = weight
elif t == 1:
weight = pmx.BoneWeight()
weight.type = pmx.BoneWeight.BDEF1
weight.bones = [v.groups[0][0]]
pv.weight = weight
elif t == 2:
vg1, vg2 = v.groups
weight = pmx.BoneWeight()
weight.type = pmx.BoneWeight.BDEF2
weight.bones = [vg1[0], vg2[0]]
w1, w2 = vg1[1], vg2[1]
weight.weights = [w1/(w1+w2)]
if v.sdef_data:
weight.type = pmx.BoneWeight.SDEF
sdef_weights = pmx.BoneWeightSDEF()
sdef_weights.weight = weight.weights[0]
sdef_weights.c, sdef_weights.r0, sdef_weights.r1 = v.sdef_data
if weight.bones[0] > weight.bones[1]:
weight.bones.reverse()
sdef_weights.weight = 1.0 - sdef_weights.weight
weight.weights = sdef_weights
pv.weight = weight
else:
weight = pmx.BoneWeight()
weight.type = pmx.BoneWeight.BDEF4
weight.bones = [0, 0, 0, 0]
weight.weights = [0.0, 0.0, 0.0, 0.0]
w_all = 0.0
if t > 4:
v.groups.sort(key=lambda x: -x[1])
for i in range(min(t, 4)):
gn, w = v.groups[i]
weight.bones[i] = gn
weight.weights[i] = w
w_all += w
for i in range(4):
weight.weights[i] /= w_all
pv.weight = weight
self.__model.vertices.append(pv)
self.__exported_vertices.append(v)
for face in mat_faces:
self.__model.faces.append([x.index for x in face.vertices])
face_count += len(mat_faces)
self.__exportMaterial(bpy.data.materials[mat_name], face_count)
if sort_vertices:
self.__sortVertices()
def __exportTexture(self, filepath):
if filepath.strip() == '':
return -1
# Use bpy.path to resolve '//' in .blend relative filepaths
filepath = bpy.path.abspath(filepath)
filepath = os.path.abspath(filepath)
for i, tex in enumerate(self.__model.textures):
if os.path.normcase(tex.path) == os.path.normcase(filepath):
return i
t = pmx.Texture()
t.path = filepath
self.__model.textures.append(t)
if not os.path.isfile(t.path):
logging.warning(' The texture file does not exist: %s', t.path)
return len(self.__model.textures) - 1
def __copy_textures(self, output_dir, base_folder=''):
tex_dir_fallback = os.path.join(output_dir, 'textures')
tex_dir_preference = bpyutils.addon_preferences('base_texture_folder', '')
path_set = set() # to prevent overwriting
tex_copy_list = []
for texture in self.__model.textures:
path = texture.path
tex_dir = output_dir # restart to the default directory at each loop
if not os.path.isfile(path):
logging.warning('*** skipping texture file which does not exist: %s', path)
path_set.add(os.path.normcase(path))
continue
dst_name = os.path.basename(path)
if base_folder:
dst_name = saferelpath(path, base_folder, strategy='outside')
if dst_name.startswith('..'):
# Check if the texture comes from the preferred folder
if tex_dir_preference:
dst_name = saferelpath(path, tex_dir_preference, strategy='outside')
if dst_name.startswith('..'):
# If the code reaches here the texture is somewhere else
logging.warning('The texture %s is not inside the base texture folder', path)
# Fall back to basename and textures folder
dst_name = os.path.basename(path)
tex_dir = tex_dir_fallback
else:
tex_dir = tex_dir_fallback
dest_path = os.path.join(tex_dir, dst_name)
if os.path.normcase(path) != os.path.normcase(dest_path): # Only copy if the paths are different
tex_copy_list.append((texture, path, dest_path))
else:
path_set.add(os.path.normcase(path))
for texture, path, dest_path in tex_copy_list:
counter = 1
base, ext = os.path.splitext(dest_path)
while os.path.normcase(dest_path) in path_set:
dest_path = '%s_%d%s'%(base, counter, ext)
counter += 1
path_set.add(os.path.normcase(dest_path))
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
shutil.copyfile(path, dest_path)
logging.info('Copy file %s --> %s', path, dest_path)
texture.path = dest_path
def __exportMaterial(self, material, num_faces):
p_mat = pmx.Material()
mmd_mat = material.mmd_material
p_mat.name = mmd_mat.name_j or material.name
p_mat.name_e = mmd_mat.name_e
p_mat.diffuse = list(mmd_mat.diffuse_color) + [mmd_mat.alpha]
p_mat.ambient = mmd_mat.ambient_color
p_mat.specular = mmd_mat.specular_color
p_mat.shininess = mmd_mat.shininess
p_mat.is_double_sided = mmd_mat.is_double_sided
p_mat.enabled_drop_shadow = mmd_mat.enabled_drop_shadow
p_mat.enabled_self_shadow_map = mmd_mat.enabled_self_shadow_map
p_mat.enabled_self_shadow = mmd_mat.enabled_self_shadow
p_mat.enabled_toon_edge = mmd_mat.enabled_toon_edge
p_mat.edge_color = mmd_mat.edge_color
p_mat.edge_size = mmd_mat.edge_weight
p_mat.sphere_texture_mode = int(mmd_mat.sphere_texture_type)
if self.__disable_specular:
p_mat.sphere_texture_mode = pmx.Material.SPHERE_MODE_OFF
p_mat.comment = mmd_mat.comment
p_mat.vertex_count = num_faces * 3
fnMat = FnMaterial(material)
tex = fnMat.get_texture()
if tex and tex.type == 'IMAGE' and tex.image: # Ensure the texture is an image
index = self.__exportTexture(tex.image.filepath)
p_mat.texture = index
tex = fnMat.get_sphere_texture()
if tex and tex.type == 'IMAGE' and tex.image: # Ensure the texture is an image
index = self.__exportTexture(tex.image.filepath)
p_mat.sphere_texture = index
if mmd_mat.is_shared_toon_texture:
p_mat.toon_texture = mmd_mat.shared_toon_texture
p_mat.is_shared_toon_texture = True
else:
p_mat.toon_texture = self.__exportTexture(mmd_mat.toon_texture)
p_mat.is_shared_toon_texture = False
self.__material_name_table.append(material.name)
self.__model.materials.append(p_mat)
@classmethod
def __countBoneDepth(cls, bone):
if bone.parent is None:
return 0
else:
return cls.__countBoneDepth(bone.parent) + 1
def __exportBones(self, meshes):
""" Export bones.
Returns:
A dictionary to map Blender bone names to bone indices of the pmx.model instance.
"""
arm = self.__armature
if hasattr(arm, 'evaluated_get'):
bpy.context.view_layer.update()
arm = arm.evaluated_get(bpy.context.evaluated_depsgraph_get())
boneMap = {}
pmx_bones = []
pose_bones = arm.pose.bones
world_mat = arm.matrix_world
r = {}
# determine the bone order
vtx_grps = {}
for mesh in meshes:
if mesh.modifiers.get('mmd_bone_order_override', None):
vtx_grps = mesh.vertex_groups
break
class _Dummy:
index = float('inf')
sorted_bones = sorted(pose_bones, key=lambda x: vtx_grps.get(x.name, _Dummy).index)
#sorted_bones = sorted(pose_bones, key=self.__countBoneDepth)
Vector = mathutils.Vector
pmx_matrix = world_mat * self.__scale
pmx_matrix[1], pmx_matrix[2] = pmx_matrix[2].copy(), pmx_matrix[1].copy()
def __to_pmx_location(loc):
return matmul(pmx_matrix, Vector(loc))
pmx_matrix_rot = pmx_matrix.to_3x3()
def __to_pmx_axis(axis, pose_bone):
m = matmul(pose_bone.matrix, pose_bone.bone.matrix_local.inverted()).to_3x3()
return matmul(matmul(pmx_matrix_rot, m), Vector(axis).xzy).normalized()
if True: # no need to enter edit mode
for p_bone in sorted_bones:
if p_bone.is_mmd_shadow_bone:
continue
bone = p_bone.bone
mmd_bone = p_bone.mmd_bone
pmx_bone = pmx.Bone()
pmx_bone.name = mmd_bone.name_j or bone.name
pmx_bone.name_e = mmd_bone.name_e
pmx_bone.hasAdditionalRotate = mmd_bone.has_additional_rotation
pmx_bone.hasAdditionalLocation = mmd_bone.has_additional_location
pmx_bone.additionalTransform = [mmd_bone.additional_transform_bone, mmd_bone.additional_transform_influence]
pmx_bone.location = __to_pmx_location(p_bone.head)
pmx_bone.parent = bone.parent
pmx_bone.visible = not bone.hide and any((all(x) for x in zip(bone.layers, arm.data.layers)))
pmx_bone.isControllable = mmd_bone.is_controllable
pmx_bone.isMovable = not all(p_bone.lock_location)
pmx_bone.isRotatable = not all(p_bone.lock_rotation)
pmx_bone.transform_order = mmd_bone.transform_order
pmx_bone.transAfterPhis = mmd_bone.transform_after_dynamics
pmx_bones.append(pmx_bone)
self.__bone_name_table.append(p_bone.name)
boneMap[bone] = pmx_bone
r[bone.name] = len(pmx_bones) - 1
if bone.use_connect and p_bone.parent.mmd_bone.is_tip:
logging.debug(' * fix location of bone %s, parent %s is tip', bone.name, bone.parent.name)
pmx_bone.location = boneMap[bone.parent].location
# a connected child bone is preferred
pmx_bone.displayConnection = None
for child in bone.children:
if child.use_connect:
pmx_bone.displayConnection = child
break
if not pmx_bone.displayConnection:
if mmd_bone.is_tip:
pmx_bone.displayConnection = -1
else:
tail_loc = __to_pmx_location(p_bone.tail)
pmx_bone.displayConnection = tail_loc - pmx_bone.location
if mmd_bone.enabled_fixed_axis:
pmx_bone.axis = __to_pmx_axis(mmd_bone.fixed_axis, p_bone)
if mmd_bone.enabled_local_axes:
pmx_bone.localCoordinate = pmx.Coordinate(
__to_pmx_axis(mmd_bone.local_axis_x, p_bone),
__to_pmx_axis(mmd_bone.local_axis_z, p_bone))
for idx, i in enumerate(pmx_bones):
if i.parent is not None:
i.parent = pmx_bones.index(boneMap[i.parent])
logging.debug('the parent of %s:%s: %s', idx, i.name, i.parent)
if isinstance(i.displayConnection, pmx.Bone):
i.displayConnection = pmx_bones.index(i.displayConnection)
elif isinstance(i.displayConnection, bpy.types.Bone):
i.displayConnection = pmx_bones.index(boneMap[i.displayConnection])
i.additionalTransform[0] = r.get(i.additionalTransform[0], -1)
if len(pmx_bones) == 0:
# avoid crashing MMD
pmx_bone = pmx.Bone()
pmx_bone.name = u'全ての親'
pmx_bone.name_e = 'Root'
pmx_bone.location = __to_pmx_location([0,0,0])
tail_loc = __to_pmx_location([0,0,1])
pmx_bone.displayConnection = tail_loc - pmx_bone.location
pmx_bones.append(pmx_bone)
self.__model.bones = pmx_bones
self.__exportIK(r)
return r
def __exportIKLinks(self, pose_bone, count, bone_map, ik_links, custom_bone):
if count <= 0 or pose_bone is None or pose_bone.name not in bone_map:
return ik_links
logging.debug(' Create IK Link for %s', pose_bone.name)
ik_link = pmx.IKLink()
ik_link.target = bone_map[pose_bone.name]
from math import pi
minimum, maximum = [-pi]*3, [pi]*3
unused_counts = 0
ik_limit_custom = next((c for c in custom_bone.constraints if c.type == 'LIMIT_ROTATION' and c.name == 'mmd_ik_limit_custom%d'%len(ik_links)), None)
ik_limit_override = next((c for c in pose_bone.constraints if c.type == 'LIMIT_ROTATION' and not c.mute), None)
for i, axis in enumerate('xyz'):
if ik_limit_custom: # custom ik limits for MMD only
if getattr(ik_limit_custom, 'use_limit_'+axis):
minimum[i] = getattr(ik_limit_custom, 'min_'+axis)
maximum[i] = getattr(ik_limit_custom, 'max_'+axis)
else:
unused_counts += 1
continue
if getattr(pose_bone, 'lock_ik_'+axis):
minimum[i] = maximum[i] = 0
elif ik_limit_override is not None and getattr(ik_limit_override, 'use_limit_'+axis):
minimum[i] = getattr(ik_limit_override, 'min_'+axis)
maximum[i] = getattr(ik_limit_override, 'max_'+axis)
elif getattr(pose_bone, 'use_ik_limit_'+axis):
minimum[i] = getattr(pose_bone, 'ik_min_'+axis)
maximum[i] = getattr(pose_bone, 'ik_max_'+axis)
else:
unused_counts += 1
if unused_counts < 3:
convertIKLimitAngles = pmx.importer.PMXImporter.convertIKLimitAngles
bone_matrix = matmul(pose_bone.id_data.matrix_world, pose_bone.matrix)
minimum, maximum = convertIKLimitAngles(minimum, maximum, bone_matrix, invert=True)
ik_link.minimumAngle = list(minimum)
ik_link.maximumAngle = list(maximum)
return self.__exportIKLinks(pose_bone.parent, count - 1, bone_map, ik_links + [ik_link], custom_bone)
def __exportIK(self, bone_map):
""" Export IK constraints
@param bone_map the dictionary to map Blender bone names to bone indices of the pmx.model instance.
"""
pmx_bones = self.__model.bones
arm = self.__armature
ik_loop_factor = max(arm.get('mmd_ik_loop_factor', 1), 1)
pose_bones = arm.pose.bones
ik_target_custom_map = {getattr(b.constraints.get('mmd_ik_target_custom', None), 'subtarget', None):b for b in pose_bones if not b.is_mmd_shadow_bone}
def __ik_target_bone_get(ik_constraint_bone, ik_bone):
if ik_bone.name in ik_target_custom_map:
logging.debug(' (use "mmd_ik_target_custom")')
return ik_target_custom_map[ik_bone.name] # for supporting the ik target which is not a child of ik_constraint_bone
return self.__get_ik_target_bone(ik_constraint_bone) # this only search the children of ik_constraint_bone
for bone in pose_bones:
if bone.is_mmd_shadow_bone:
continue
for c in bone.constraints:
if c.type == 'IK' and not c.mute:
logging.debug(' Found IK constraint on %s', bone.name)
ik_pose_bone = self.__get_ik_control_bone(c)
if ik_pose_bone is None:
logging.warning(' * Invalid IK constraint "%s" on bone %s', c.name, bone.name)
continue
ik_bone_index = bone_map.get(ik_pose_bone.name, -1)
if ik_bone_index < 0:
logging.warning(' * IK bone "%s" not found !!!', ik_pose_bone.name)
continue
pmx_ik_bone = pmx_bones[ik_bone_index]
if pmx_ik_bone.isIK:
logging.warning(' * IK bone "%s" is used by another IK setting !!!', ik_pose_bone.name)
continue
ik_chain0 = bone if c.use_tail else bone.parent
ik_target_bone = __ik_target_bone_get(bone, ik_pose_bone) if c.use_tail else bone
if ik_target_bone is None:
logging.warning(' * IK bone: %s, IK Target not found !!!', ik_pose_bone.name)
continue
logging.debug(' - IK bone: %s, IK Target: %s', ik_pose_bone.name, ik_target_bone.name)
pmx_ik_bone.isIK = True
pmx_ik_bone.loopCount = max(int(c.iterations/ik_loop_factor), 1)
if ik_pose_bone.name in ik_target_custom_map:
pmx_ik_bone.rotationConstraint = ik_pose_bone.mmd_bone.ik_rotation_constraint
else:
pmx_ik_bone.rotationConstraint = bone.mmd_bone.ik_rotation_constraint
pmx_ik_bone.target = bone_map[ik_target_bone.name]
pmx_ik_bone.ik_links = self.__exportIKLinks(ik_chain0, c.chain_count, bone_map, [], ik_pose_bone)
def __get_ik_control_bone(self, ik_constraint):
arm = ik_constraint.target
if arm != ik_constraint.id_data:
return None
bone = arm.pose.bones.get(ik_constraint.subtarget, None)
if bone is None:
return None
if bone.mmd_shadow_bone_type == 'IK_TARGET':
logging.debug(' Found IK proxy bone: %s -> %s', bone.name, getattr(bone.parent, 'name', None))
return bone.parent
return bone
def __get_ik_target_bone(self, target_bone):
""" Get mmd ik target bone.
Args:
target_bone: A blender PoseBone
Returns:
A bpy.types.PoseBone object which is the closest bone from the tail position of target_bone.
Return None if target_bone has no child bones.
"""
valid_children = [c for c in target_bone.children if not c.is_mmd_shadow_bone]
# search 'mmd_ik_target_override' first
for c in valid_children:
ik_target_override = c.constraints.get('mmd_ik_target_override', None)
if ik_target_override and ik_target_override.subtarget == target_bone.name:
logging.debug(' (use "mmd_ik_target_override")')
return c
r = None
min_length = None
for c in valid_children:
if c.bone.use_connect:
return c
length = (c.head - target_bone.tail).length
if min_length is None or length < min_length:
min_length = length
r = c
return r
def __exportVertexMorphs(self, meshes, root):
shape_key_names = []
for mesh in meshes:
for i in mesh.shape_key_names:
if i not in shape_key_names:
shape_key_names.append(i)
morph_categories = {}
morph_english_names = {}
if root:
categories = self.CATEGORIES
for vtx_morph in root.mmd_root.vertex_morphs:
morph_english_names[vtx_morph.name] = vtx_morph.name_e
morph_categories[vtx_morph.name] = categories.get(vtx_morph.category, pmx.Morph.CATEGORY_OHTER)
shape_key_names.sort(key=lambda x: root.mmd_root.vertex_morphs.find(x))
for i in shape_key_names:
morph = pmx.VertexMorph(
name=i,
name_e=morph_english_names.get(i, ''),
category=morph_categories.get(i, pmx.Morph.CATEGORY_OHTER)
)
self.__model.morphs.append(morph)
append_table = dict(zip(shape_key_names, [m.offsets.append for m in self.__model.morphs]))
for v in self.__exported_vertices:
for i, offset in v.offsets.items():
mo = pmx.VertexMorphOffset()
mo.index = v.index
mo.offset = offset
append_table[i](mo)
def __export_material_morphs(self, root):
mmd_root = root.mmd_root
categories = self.CATEGORIES
for morph in mmd_root.material_morphs:
mat_morph = pmx.MaterialMorph(
name=morph.name,
name_e=morph.name_e,
category=categories.get(morph.category, pmx.Morph.CATEGORY_OHTER)
)
for data in morph.data:
morph_data = pmx.MaterialMorphOffset()
try:
if data.material != '':
morph_data.index = self.__material_name_table.index(data.material)
else:
morph_data.index = -1
except ValueError:
logging.warning('Material Morph (%s): Material "%s" was not found.', morph.name, data.material)
continue
morph_data.offset_type = ['MULT', 'ADD'].index(data.offset_type)
morph_data.diffuse_offset = data.diffuse_color
morph_data.specular_offset = data.specular_color
morph_data.shininess_offset = data.shininess
morph_data.ambient_offset = data.ambient_color
morph_data.edge_color_offset = data.edge_color
morph_data.edge_size_offset = data.edge_weight
morph_data.texture_factor = data.texture_factor
morph_data.sphere_texture_factor = data.sphere_texture_factor
morph_data.toon_texture_factor = data.toon_texture_factor
mat_morph.offsets.append(morph_data)
self.__model.morphs.append(mat_morph)
def __sortMaterials(self):
""" sort materials for alpha blending
モデル内全頂点の平均座標をモデルの中心と考えて、
モデル中心座標とマテリアルがアサインされている全ての面の構成頂点との平均距離を算出。
この値が小さい順にソートしてみる。
モデル中心座標から離れている位置で使用されているマテリアルほどリストの後ろ側にくるように。
かなりいいかげんな実装
"""
center = mathutils.Vector([0, 0, 0])
vertices = self.__model.vertices
vert_num = len(vertices)
for v in self.__model.vertices:
center += mathutils.Vector(v.co) / vert_num
faces = self.__model.faces
offset = 0
distances = []
for mat, bl_mat_name in zip(self.__model.materials, self.__material_name_table):
d = 0
face_num = int(mat.vertex_count / 3)
for i in range(offset, offset + face_num):
face = faces[i]
d += (mathutils.Vector(vertices[face[0]].co) - center).length
d += (mathutils.Vector(vertices[face[1]].co) - center).length
d += (mathutils.Vector(vertices[face[2]].co) - center).length
distances.append((d/mat.vertex_count, mat, offset, face_num, bl_mat_name))
offset += face_num
sorted_faces = []
sorted_mat = []
self.__material_name_table.clear()
for d, mat, offset, vert_count, bl_mat_name in sorted(distances, key=lambda x: x[0]):
sorted_faces.extend(faces[offset:offset+vert_count])
sorted_mat.append(mat)
self.__material_name_table.append(bl_mat_name)
self.__model.materials = sorted_mat
self.__model.faces = sorted_faces
def __export_bone_morphs(self, root):
mmd_root = root.mmd_root
if len(mmd_root.bone_morphs) == 0:
return
categories = self.CATEGORIES
pose_bones = self.__armature.pose.bones
use_pose_mode = mmd_root.is_built and self.__armature.data.pose_position != 'REST'
bone_util_cls = BoneConverterPoseMode if use_pose_mode else BoneConverter
for morph in mmd_root.bone_morphs:
bone_morph = pmx.BoneMorph(
name=morph.name,
name_e=morph.name_e,
category=categories.get(morph.category, pmx.Morph.CATEGORY_OHTER)
)
for data in morph.data:
morph_data = pmx.BoneMorphOffset()
try:
morph_data.index = self.__bone_name_table.index(data.bone)
except ValueError:
continue
blender_bone = pose_bones.get(data.bone, None)
if blender_bone is None:
logging.warning('Bone Morph (%s): Bone "%s" was not found.', morph.name, data.bone)
continue
converter = bone_util_cls(blender_bone, self.__scale, invert=True)
morph_data.location_offset = converter.convert_location(data.location)
rw, rx, ry, rz = data.rotation
rw, rx, ry, rz = converter.convert_rotation([rx, ry, rz, rw])
morph_data.rotation_offset = (rx, ry, rz, rw)
bone_morph.offsets.append(morph_data)
self.__model.morphs.append(bone_morph)
def __export_uv_morphs(self, root):
mmd_root = root.mmd_root
if len(mmd_root.uv_morphs) == 0:
return
categories = self.CATEGORIES
append_table_vg = {}
for morph in mmd_root.uv_morphs:
uv_morph = pmx.UVMorph(
name=morph.name,
name_e=morph.name_e,
category=categories.get(morph.category, pmx.Morph.CATEGORY_OHTER)
)
uv_morph.uv_index = morph.uv_index
self.__model.morphs.append(uv_morph)
if morph.data_type == 'VERTEX_GROUP':
append_table_vg[morph.name] = uv_morph.offsets.append
continue
logging.warning(' * Deprecated UV morph "%s", please convert it to vertex groups', morph.name)
if append_table_vg:
incompleted = set()
uv_morphs = mmd_root.uv_morphs
for v in self.__exported_vertices:
for name, offset in v.uv_offsets.items():
if name not in append_table_vg:
incompleted.add(name)
continue
scale = uv_morphs[name].vertex_group_scale
morph_data = pmx.UVMorphOffset()
morph_data.index = v.index
morph_data.offset = (offset[0]*scale, -offset[1]*scale, offset[2]*scale, -offset[3]*scale)
append_table_vg[name](morph_data)
if incompleted:
logging.warning(' * Incompleted UV morphs %s with vertex groups', incompleted)
def __export_group_morphs(self, root):
mmd_root = root.mmd_root
if len(mmd_root.group_morphs) == 0:
return
categories = self.CATEGORIES
start_index = len(self.__model.morphs)
for morph in mmd_root.group_morphs:
group_morph = pmx.GroupMorph(
name=morph.name,
name_e=morph.name_e,
category=categories.get(morph.category, pmx.Morph.CATEGORY_OHTER)
)
self.__model.morphs.append(group_morph)
morph_map = self.__get_pmx_morph_map()
for morph, group_morph in zip(mmd_root.group_morphs, self.__model.morphs[start_index:]):
for data in morph.data:
morph_index = morph_map.get((data.morph_type, data.name), -1)
if morph_index < 0:
logging.warning('Group Morph (%s): Morph "%s" was not found.', morph.name, data.name)
continue
morph_data = pmx.GroupMorphOffset()
morph_data.morph = morph_index
morph_data.factor = data.factor
group_morph.offsets.append(morph_data)
def __exportDisplayItems(self, root, bone_map):
res = []
morph_map = self.__get_pmx_morph_map()
for i in root.mmd_root.display_item_frames:
d = pmx.Display()
d.name = i.name
d.name_e = i.name_e
d.isSpecial = i.is_special
items = []
for j in i.data:
if j.type == 'BONE' and j.name in bone_map:
items.append((0, bone_map[j.name]))
elif j.type == 'MORPH' and (j.morph_type, j.name) in morph_map:
items.append((1, morph_map[(j.morph_type, j.name)]))
else:
logging.warning('Display item (%s, %s) was not found.', j.type, j.name)
d.data = items
res.append(d)
self.__model.display = res
def __get_pmx_morph_map(self):
morph_types = {
pmx.GroupMorph : 'group_morphs',
pmx.VertexMorph : 'vertex_morphs',
pmx.BoneMorph : 'bone_morphs',
pmx.UVMorph : 'uv_morphs',
pmx.MaterialMorph : 'material_morphs',
}
morph_map = {}
for i, m in enumerate(self.__model.morphs):
morph_map[(morph_types[type(m)], m.name)] = i
return morph_map
def __exportRigidBodies(self, rigid_bodies, bone_map):
rigid_map = {}
rigid_cnt = 0
Vector = mathutils.Vector
for obj in rigid_bodies:
t, r, s = obj.matrix_world.decompose()
r = r.to_euler('YXZ')
rb = obj.rigid_body
if rb is None:
logging.warning(' * Settings of rigid body "%s" not found, skipped!', obj.name)
continue
p_rigid = pmx.Rigid()
mmd_rigid = obj.mmd_rigid
p_rigid.name = mmd_rigid.name_j or MoveObject.get_name(obj)
p_rigid.name_e = mmd_rigid.name_e
p_rigid.location = Vector(t).xzy * self.__scale
p_rigid.rotation = Vector(r).xzy * -1
p_rigid.mode = int(mmd_rigid.type)
rigid_shape = mmd_rigid.shape
shape_size = Vector(mmd_rigid.size) * (sum(s) / 3)
if rigid_shape == 'SPHERE':
p_rigid.type = 0
p_rigid.size = shape_size * self.__scale
elif rigid_shape == 'BOX':
p_rigid.type = 1
p_rigid.size = shape_size.xzy * self.__scale
elif rigid_shape == 'CAPSULE':
p_rigid.type = 2
p_rigid.size = shape_size * self.__scale
else:
raise Exception('Invalid rigid body type: %s %s', obj.name, rigid_shape)
p_rigid.bone = bone_map.get(mmd_rigid.bone, -1)
p_rigid.collision_group_number = mmd_rigid.collision_group_number
mask = 0
for i, v in enumerate(mmd_rigid.collision_group_mask):
if not v:
mask += (1<<i)
p_rigid.collision_group_mask = mask
p_rigid.mass = rb.mass
p_rigid.friction = rb.friction
p_rigid.bounce = rb.restitution
p_rigid.velocity_attenuation = rb.linear_damping
p_rigid.rotation_attenuation = rb.angular_damping
self.__model.rigids.append(p_rigid)
rigid_map[obj] = rigid_cnt
rigid_cnt += 1
return rigid_map
def __exportJoints(self, joints, rigid_map):
Vector = mathutils.Vector
for joint in joints:
t, r, s = joint.matrix_world.decompose()
r = r.to_euler('YXZ')
rbc = joint.rigid_body_constraint
if rbc is None:
logging.warning(' * Settings of joint "%s" not found, skipped!', joint.name)
continue
p_joint = pmx.Joint()
mmd_joint = joint.mmd_joint
p_joint.name = mmd_joint.name_j or MoveObject.get_name(joint, 'J.')
p_joint.name_e = mmd_joint.name_e
p_joint.location = Vector(t).xzy * self.__scale
p_joint.rotation = Vector(r).xzy * -1
p_joint.src_rigid = rigid_map.get(rbc.object1, -1)
p_joint.dest_rigid = rigid_map.get(rbc.object2, -1)
scale = self.__scale * sum(s) / 3
p_joint.maximum_location = Vector((rbc.limit_lin_x_upper, rbc.limit_lin_z_upper, rbc.limit_lin_y_upper)) * scale
p_joint.minimum_location = Vector((rbc.limit_lin_x_lower, rbc.limit_lin_z_lower, rbc.limit_lin_y_lower)) * scale
p_joint.maximum_rotation = Vector((rbc.limit_ang_x_lower, rbc.limit_ang_z_lower, rbc.limit_ang_y_lower)) * -1
p_joint.minimum_rotation = Vector((rbc.limit_ang_x_upper, rbc.limit_ang_z_upper, rbc.limit_ang_y_upper)) * -1
p_joint.spring_constant = Vector(mmd_joint.spring_linear).xzy
p_joint.spring_rotation_constant = Vector(mmd_joint.spring_angular).xzy
self.__model.joints.append(p_joint)
@staticmethod
def __convertFaceUVToVertexUV(vert_index, uv, normal, vertices_map):
vertices = vertices_map[vert_index]
for i in vertices:
if i.uv is None:
i.uv = uv
i.normal = normal
return i
elif (i.uv - uv).length < 0.001 and (normal - i.normal).length < 0.01:
return i
n = copy.copy(i) # shallow copy should be fine
n.uv = uv
n.normal = normal
vertices.append(n)
return n
@staticmethod
def __convertAddUV(vert, adduv, addzw, uv_index, vertices, rip_vertices):
if vert.add_uvs[uv_index] is None:
vert.add_uvs[uv_index] = (adduv, addzw)
return vert
for i in rip_vertices:
uvzw = i.add_uvs[uv_index]
if (uvzw[0] - adduv).length < 0.001 and (uvzw[1] - addzw).length < 0.001:
return i
n = copy.copy(vert)
add_uvs = n.add_uvs.copy()
add_uvs[uv_index] = (adduv, addzw)
n.add_uvs = add_uvs
vertices.append(n)
rip_vertices.append(n)
return n
@staticmethod
def __triangulate(mesh, custom_normals):
bm = bmesh.new()
bm.from_mesh(mesh)
is_triangulated = True
face_verts_to_loop_id_map = {}
loop_id = 0
for f in bm.faces:
vert_to_loop_id = face_verts_to_loop_id_map.setdefault(f, {})
if is_triangulated and len(f.verts) != 3:
is_triangulated = False
for v in f.verts:
vert_to_loop_id[v] = loop_id
loop_id += 1
loop_normals, face_indices = None, None
if is_triangulated:
loop_normals = custom_normals
else:
quad_method, ngon_method = (1, 1) if bpy.app.version < (2, 80, 0) else ('FIXED', 'EAR_CLIP')
face_map = bmesh.ops.triangulate(bm, faces=bm.faces, quad_method=quad_method, ngon_method=ngon_method)['face_map']
logging.debug(' - Remapping custom normals...')
loop_normals, face_indices = [], []
for f in bm.faces:
f_orig = face_map.get(f, f)
face_indices.append(f_orig.index)
vert_to_loop_id = face_verts_to_loop_id_map[f_orig]
for v in f.verts:
loop_normals.append(custom_normals[vert_to_loop_id[v]])
logging.debug(' - Done (faces:%d)', len(bm.faces))
bm.to_mesh(mesh)
face_map.clear()
face_verts_to_loop_id_map.clear()
bm.free()
assert(len(loop_normals) == len(mesh.loops))
return loop_normals, face_indices
@staticmethod
def __get_normals(mesh, matrix):
custom_normals = None
if hasattr(mesh, 'has_custom_normals'):
logging.debug(' - Calculating normals split...')
mesh.calc_normals_split()
custom_normals = [matmul(matrix, l.normal).normalized() for l in mesh.loops]
mesh.free_normals_split()
elif mesh.use_auto_smooth:
logging.debug(' - Calculating normals split (angle:%f)...', mesh.auto_smooth_angle)
mesh.calc_normals_split(mesh.auto_smooth_angle)
custom_normals = [matmul(matrix, l.normal).normalized() for l in mesh.loops]
mesh.free_normals_split()
else:
logging.debug(' - Calculating normals...')
mesh.calc_normals()
#custom_normals = [matmul(matrix, mesh.vertices[l.vertex_index].normal).normalized() for l in mesh.loops]
custom_normals = []
for f in mesh.polygons:
if f.use_smooth:
for v in f.vertices:
custom_normals.append(matmul(matrix, mesh.vertices[v].normal).normalized())
else:
for v in f.vertices:
custom_normals.append(matmul(matrix, f.normal).normalized())
logging.debug(' - Done (polygons:%d)', len(mesh.polygons))
return custom_normals
def __doLoadMeshData(self, meshObj, bone_map):
vg_to_bone = {i:bone_map[x.name] for i, x in enumerate(meshObj.vertex_groups) if x.name in bone_map}
vg_edge_scale = meshObj.vertex_groups.get('mmd_edge_scale', None)
vg_vertex_order = meshObj.vertex_groups.get('mmd_vertex_order', None)
pmx_matrix = meshObj.matrix_world * self.__scale
pmx_matrix[1], pmx_matrix[2] = pmx_matrix[2].copy(), pmx_matrix[1].copy()
sx, sy, sz = meshObj.matrix_world.to_scale()
normal_matrix = pmx_matrix.to_3x3()
if not (sx == sy == sz):
invert_scale_matrix = mathutils.Matrix([[1.0/sx,0,0], [0,1.0/sy,0], [0,0,1.0/sz]])
normal_matrix = matmul(normal_matrix, invert_scale_matrix) # reset the scale of meshObj.matrix_world
normal_matrix = matmul(normal_matrix, invert_scale_matrix) # the scale transform of normals
if bpy.app.version < (2, 80, 0):
_to_mesh = lambda obj: obj.to_mesh(bpy.context.scene, apply_modifiers=True, settings='PREVIEW', calc_tessface=False, calc_undeformed=False)
_to_mesh_clear = lambda obj, mesh: bpy.data.meshes.remove(mesh)
else:
def _to_mesh(obj):
bpy.context.view_layer.update()
depsgraph = bpy.context.evaluated_depsgraph_get()
return obj.evaluated_get(depsgraph).to_mesh(depsgraph=depsgraph, preserve_all_data_layers=True)
_to_mesh_clear = lambda obj, mesh: obj.to_mesh_clear()
base_mesh = _to_mesh(meshObj)
loop_normals, face_indices = self.__triangulate(base_mesh, self.__get_normals(base_mesh, normal_matrix))
base_mesh.transform(pmx_matrix)
def _get_weight(vertex_group_index, vertex, default_weight):
for i in vertex.groups:
if i.group == vertex_group_index:
return i.weight
return default_weight
get_edge_scale = None
if vg_edge_scale:
get_edge_scale = lambda x: _get_weight(vg_edge_scale.index, x, 1)
else:
get_edge_scale = lambda x: 1
get_vertex_order = None
if self.__vertex_order_map: # sort vertices
mesh_id = self.__vertex_order_map.setdefault('mesh_id', 0)
self.__vertex_order_map['mesh_id'] += 1
if vg_vertex_order and self.__vertex_order_map['method'] == 'CUSTOM':
get_vertex_order = lambda x: (mesh_id, _get_weight(vg_vertex_order.index, x, 2), x.index)
else:
get_vertex_order = lambda x: (mesh_id, x.index)
else:
get_vertex_order = lambda x: None
uv_morph_names = {g.index:(n, x) for g, n, x in FnMorph.get_uv_morph_vertex_groups(meshObj)}
def get_uv_offsets(v):
uv_offsets = {}
for x in v.groups:
if x.group in uv_morph_names and x.weight > 0:
name, axis = uv_morph_names[x.group]
d = uv_offsets.setdefault(name, [0, 0, 0, 0])
d['XYZW'.index(axis[1])] += -x.weight if axis[0] == '-' else x.weight
return uv_offsets
base_vertices = {}
for v in base_mesh.vertices:
base_vertices[v.index] = [_Vertex(
v.co.copy(),
[(vg_to_bone[x.group], x.weight) for x in v.groups if x.weight > 0 and x.group in vg_to_bone],
{},
get_edge_scale(v),
get_vertex_order(v),
get_uv_offsets(v),
)]
# load face data
class _DummyUV:
uv1 = uv2 = uv3 = mathutils.Vector((0, 1))
def __init__(self, uvs):
self.uv1, self.uv2, self.uv3 = (v.uv.copy() for v in uvs)
_UVWrapper = lambda x: (_DummyUV(x[i:i+3]) for i in range(0, len(x), 3))
material_faces = {}
uv_data = base_mesh.uv_layers.active
if uv_data:
uv_data = _UVWrapper(uv_data.data)
else:
uv_data = iter(lambda: _DummyUV, None)
face_seq = []
for face, uv, face_index in zip(base_mesh.polygons, uv_data, face_indices or iter(lambda: -1, None)):
if len(face.vertices) != 3:
raise Exception
idx = face.index * 3
n1, n2, n3 = loop_normals[idx:idx+3]
v1 = self.__convertFaceUVToVertexUV(face.vertices[0], uv.uv1, n1, base_vertices)
v2 = self.__convertFaceUVToVertexUV(face.vertices[1], uv.uv2, n2, base_vertices)
v3 = self.__convertFaceUVToVertexUV(face.vertices[2], uv.uv3, n3, base_vertices)
t = _Face([v1, v2, v3], face_index)
face_seq.append(t)
if face.material_index not in material_faces:
material_faces[face.material_index] = []
material_faces[face.material_index].append(t)
if face_indices:
for f in material_faces.values():
f.sort(key=lambda x: x.index)
_mat_name = lambda x: x.name if x else self.__getDefaultMaterial().name
material_names = {i:_mat_name(m) for i, m in enumerate(base_mesh.materials)}
material_names = {i:material_names.get(i, None) or _mat_name(None) for i in material_faces.keys()}
# export add UV
bl_add_uvs = [i for i in base_mesh.uv_layers[1:] if not i.name.startswith('_')]
self.__add_uv_count = max(self.__add_uv_count, len(bl_add_uvs))
for uv_n, uv_tex in enumerate(bl_add_uvs):
if uv_n > 3:
logging.warning(' * extra addUV%d+ are not supported', uv_n+1)
break
uv_data = _UVWrapper(uv_tex.data)
zw_data = base_mesh.uv_layers.get('_'+uv_tex.name, None)
logging.info(' # exporting addUV%d: %s [zw: %s]', uv_n+1, uv_tex.name, zw_data)
if zw_data:
zw_data = _UVWrapper(zw_data.data)
else:
zw_data = iter(lambda: _DummyUV, None)
rip_vertices_map = {}
for f, face, uv, zw in zip(face_seq, base_mesh.polygons, uv_data, zw_data):
vertices = [base_vertices[x] for x in face.vertices]
rip_vertices = [rip_vertices_map.setdefault(x, [x]) for x in f.vertices]
f.vertices[0] = self.__convertAddUV(f.vertices[0], uv.uv1, zw.uv1, uv_n, vertices[0], rip_vertices[0])
f.vertices[1] = self.__convertAddUV(f.vertices[1], uv.uv2, zw.uv2, uv_n, vertices[1], rip_vertices[1])
f.vertices[2] = self.__convertAddUV(f.vertices[2], uv.uv3, zw.uv3, uv_n, vertices[2], rip_vertices[2])
_to_mesh_clear(meshObj, base_mesh)
# calculate offsets
shape_key_list = []
if meshObj.data.shape_keys:
for i, kb in enumerate(meshObj.data.shape_keys.key_blocks):
if i == 0: # Basis
continue
if kb.name.startswith('mmd_bind') or kb.name == FnSDEF.SHAPEKEY_NAME:
continue
if kb.name == 'mmd_sdef_c': # make sure 'mmd_sdef_c' is at first
shape_key_list = [(i, kb)] + shape_key_list
else:
shape_key_list.append((i, kb))
shape_key_names = []
sdef_counts = 0
for i, kb in shape_key_list:
shape_key_name = kb.name
logging.info(' - processing shape key: %s', shape_key_name)
kb_mute, kb.mute = kb.mute, False
meshObj.active_shape_key_index = i
mesh = _to_mesh(meshObj)
mesh.transform(pmx_matrix)
kb.mute = kb_mute
if len(mesh.vertices) != len(base_vertices):
logging.warning(' * Error! vertex count mismatch!')
continue
if shape_key_name in {'mmd_sdef_c', 'mmd_sdef_r0', 'mmd_sdef_r1'}:
if shape_key_name == 'mmd_sdef_c':
for v in mesh.vertices:
base = base_vertices[v.index][0]
if len(base.groups) != 2:
continue
base_co = base.co
c_co = v.co
if (c_co - base_co).length < 0.001:
continue
base.sdef_data[:] = tuple(c_co), base_co, base_co
sdef_counts += 1
logging.info(' - Restored %d SDEF vertices', sdef_counts)
elif sdef_counts > 0:
ri = 1 if shape_key_name == 'mmd_sdef_r0' else 2
for v in mesh.vertices:
sdef_data = base_vertices[v.index][0].sdef_data
if sdef_data:
sdef_data[ri] = tuple(v.co)
logging.info(' - Updated SDEF data')
else:
shape_key_names.append(shape_key_name)
for v in mesh.vertices:
base = base_vertices[v.index][0]
offset = v.co - base.co
if offset.length < 0.001:
continue
base.offsets[shape_key_name] = offset
_to_mesh_clear(meshObj, mesh)
if not pmx_matrix.is_negative: # pmx.load/pmx.save reverse face vertices by default
for f in face_seq:
f.vertices.reverse()
return _Mesh(
material_faces,
shape_key_names,
material_names)
def __loadMeshData(self, meshObj, bone_map):
show_only_shape_key = meshObj.show_only_shape_key
meshObj.show_only_shape_key = True
active_shape_key_index = meshObj.active_shape_key_index
meshObj.active_shape_key_index = 0
uv_textures = getattr(meshObj.data, 'uv_textures', meshObj.data.uv_layers)
active_uv_texture_index = uv_textures.active_index
uv_textures.active_index = 0
muted_modifiers = []
for m in meshObj.modifiers:
if m.type != 'ARMATURE' or m.object is None:
continue
if m.object.data.pose_position == 'REST':
muted_modifiers.append((m, m.show_viewport))
m.show_viewport = False
try:
logging.info('Loading mesh: %s', meshObj.name)
return self.__doLoadMeshData(meshObj, bone_map)
finally:
meshObj.show_only_shape_key = show_only_shape_key
meshObj.active_shape_key_index = active_shape_key_index
uv_textures.active_index = active_uv_texture_index
for m, show in muted_modifiers:
m.show_viewport = show
def execute(self, filepath, **args):
root = args.get('root', None)
self.__model = pmx.Model()
self.__model.name = 'test'
self.__model.name_e = 'test eng'
self.__model.comment = 'exported by mmd_tools'
self.__model.comment_e = 'exported by mmd_tools'
if root is not None:
self.__model.name = root.mmd_root.name or root.name
self.__model.name_e = root.mmd_root.name_e
txt = bpy.data.texts.get(root.mmd_root.comment_text, None)
if txt:
self.__model.comment = txt.as_string().replace('\n', '\r\n')
txt = bpy.data.texts.get(root.mmd_root.comment_e_text, None)
if txt:
self.__model.comment_e = txt.as_string().replace('\n', '\r\n')
self.__armature = args.get('armature', None)
meshes = sorted(args.get('meshes', []), key=lambda x: x.name)
rigids = sorted(args.get('rigid_bodies', []), key=lambda x: x.name)
joints = sorted(args.get('joints', []), key=lambda x: x.name)
self.__scale = args.get('scale', 1.0)
self.__disable_specular = args.get('disable_specular', False)
sort_vertices = args.get('sort_vertices', 'NONE')
if sort_vertices != 'NONE':
self.__vertex_order_map = {'method':sort_vertices}
nameMap = self.__exportBones(meshes)
mesh_data = [self.__loadMeshData(i, nameMap) for i in meshes]
self.__exportMeshes(mesh_data, nameMap)
if args.get('sort_materials', False):
self.__sortMaterials()
self.__exportVertexMorphs(mesh_data, root)
if root is not None:
self.__export_bone_morphs(root)
self.__export_material_morphs(root)
self.__export_uv_morphs(root)
self.__export_group_morphs(root)
self.__exportDisplayItems(root, nameMap)
rigid_map = self.__exportRigidBodies(rigids, nameMap)
self.__exportJoints(joints, rigid_map)
if args.get('copy_textures', False):
output_dir = os.path.dirname(filepath)
import_folder = root.get('import_folder', '') if root else ''
base_folder = bpyutils.addon_preferences('base_texture_folder', '')
self.__copy_textures(output_dir, import_folder or base_folder)
pmx.save(filepath, self.__model, add_uv_count=self.__add_uv_count)
def export(filepath, **kwargs):
logging.info('****************************************')
logging.info(' %s module'%__name__)
logging.info('----------------------------------------')
start_time = time.time()
exporter = __PmxExporter()
exporter.execute(filepath, **kwargs)
logging.info(' Finished exporting the model in %f seconds.', time.time() - start_time)
logging.info('----------------------------------------')
logging.info(' %s module'%__name__)
logging.info('****************************************')
| powroupi/blender_mmd_tools | mmd_tools/core/pmx/exporter.py | Python | gpl-3.0 | 56,830 | [
"VMD"
] | bbeac54b35be75e1a4ac01c78205e4d5ed882cfdf08d8b57e226e3144e583f8f |
########################################################################
# File : AgentModule.py
# Author : Adria Casajus
########################################################################
"""
Base class for all agent modules
"""
import os
import threading
import time
import signal
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, rootPath
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Core.Utilities import Time, MemStat
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
__RCSID__ = "$Id$"
class AgentModule( object ):
""" Base class for all agent modules
This class is used by the AgentReactor Class to steer the execution of
DIRAC Agents.
For this purpose the following methods are used:
- am_initialize() just after instantiated
- am_getPollingTime() to set the execution frequency
- am_getMaxCycles() to determine the number of cycles
- am_go() for the actual execution of one cycle
Before each iteration, the following methods are used to determine
if the new cycle is to be started.
- am_getModuleParam( 'alive' )
- am_checkStopAgentFile()
- am_removeStopAgentFile()
To start new execution cycle the following methods are used
- am_getCyclesDone()
- am_setOption( 'MaxCycles', maxCycles )
At the same time it provides all Agents with common interface.
All Agent class must inherit from this base class and must implement
at least the following method:
- execute() main method called in the agent cycle
Additionally they may provide:
- initialize() for initial settings
- finalize() the graceful exit
- beginExecution() before each execution cycle
- endExecution() at the end of each execution cycle
The agent can be stopped either by a signal or by creating a 'stop_agent' file
in the controlDirectory defined in the agent configuration
"""
def __init__( self, agentName, loadName, baseAgentName = False, properties = {} ):
"""
Common __init__ method for all Agents.
All Agent modules must define:
__doc__
__RCSID__
They are used to populate __codeProperties
The following Options are used from the Configuration:
- /LocalSite/InstancePath
- /DIRAC/Setup
- Status
- Enabled
- PollingTime default = 120
- MaxCycles default = 500
- WatchdogTime default = 0 (disabled)
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy ''
- shifterProxyLocation WorkDirectory/SystemName/AgentName/.shifterCred
It defines the following default Options that can be set via Configuration (above):
- MonitoringEnabled True
- Enabled True if Status == Active
- PollingTime 120
- MaxCycles 500
- ControlDirectory control/SystemName/AgentName
- WorkDirectory work/SystemName/AgentName
- shifterProxy False
- shifterProxyLocation work/SystemName/AgentName/.shifterCred
different defaults can be set in the initialize() method of the Agent using am_setOption()
In order to get a shifter proxy in the environment during the execute()
the configuration Option 'shifterProxy' must be set, a default may be given
in the initialize() method.
"""
if baseAgentName and agentName == baseAgentName:
self.log = gLogger
standaloneModule = True
else:
self.log = gLogger.getSubLogger( agentName, child = False )
standaloneModule = False
self.__basePath = gConfig.getValue( '/LocalSite/InstancePath', rootPath )
self.__agentModule = None
self.__codeProperties = {}
self.__getCodeInfo()
self.__moduleProperties = { 'fullName' : agentName,
'loadName' : loadName,
'section' : PathFinder.getAgentSection( agentName ),
'loadSection' : PathFinder.getAgentSection( loadName ),
'standalone' : standaloneModule,
'cyclesDone' : 0,
'totalElapsedTime' : 0,
'setup' : gConfig.getValue( "/DIRAC/Setup", "Unknown" ),
'alive' : True }
self.__moduleProperties[ 'system' ], self.__moduleProperties[ 'agentName' ] = agentName.split( "/" )
self.__configDefaults = {}
self.__configDefaults[ 'MonitoringEnabled'] = True
self.__configDefaults[ 'Enabled'] = self.am_getOption( "Status", "Active" ).lower() in ( 'active' )
self.__configDefaults[ 'PollingTime'] = self.am_getOption( "PollingTime", 120 )
self.__configDefaults[ 'MaxCycles'] = self.am_getOption( "MaxCycles", 500 )
self.__configDefaults[ 'WatchdogTime' ] = self.am_getOption( "WatchdogTime", 0 )
self.__configDefaults[ 'ControlDirectory' ] = os.path.join( self.__basePath,
'control',
*agentName.split( "/" ) )
self.__configDefaults[ 'WorkDirectory' ] = os.path.join( self.__basePath,
'work',
*agentName.split( "/" ) )
self.__configDefaults[ 'shifterProxy' ] = ''
self.__configDefaults[ 'shifterProxyLocation' ] = os.path.join( self.__configDefaults[ 'WorkDirectory' ],
'.shifterCred' )
if isinstance( properties, dict):
for key in properties:
self.__moduleProperties[ key ] = properties[ key ]
self.__moduleProperties[ 'executors' ] = [ ( self.execute, () ) ]
self.__moduleProperties[ 'shifterProxy' ] = False
self.__monitorLastStatsUpdate = -1
self.monitor = None
self.__initializeMonitor()
self.__initialized = False
def __getCodeInfo( self ):
versionVar = "__RCSID__"
docVar = "__doc__"
try:
self.__agentModule = __import__( self.__class__.__module__,
globals(),
locals(),
versionVar )
except Exception as excp:
self.log.exception( "Cannot load agent module", lException = excp )
for prop in ( ( versionVar, "version" ), ( docVar, "description" ) ):
try:
self.__codeProperties[ prop[1] ] = getattr( self.__agentModule, prop[0] )
except Exception:
self.log.error( "Missing property", prop[0] )
self.__codeProperties[ prop[1] ] = 'unset'
self.__codeProperties[ 'DIRACVersion' ] = DIRAC.version
self.__codeProperties[ 'platform' ] = DIRAC.getPlatform()
def am_initialize( self, *initArgs ):
agentName = self.am_getModuleParam( 'fullName' )
result = self.initialize( *initArgs )
if not isReturnStructure( result ):
return S_ERROR( "initialize must return S_OK/S_ERROR" )
if not result[ 'OK' ]:
return S_ERROR( "Error while initializing %s: %s" % ( agentName, result[ 'Message' ] ) )
mkDir( self.am_getControlDirectory() )
workDirectory = self.am_getWorkDirectory()
mkDir( workDirectory )
# Set the work directory in an environment variable available to subprocesses if needed
os.environ['AGENT_WORKDIRECTORY'] = workDirectory
self.__moduleProperties[ 'shifterProxy' ] = self.am_getOption( 'shifterProxy' )
if self.am_monitoringEnabled():
self.monitor.enable()
if len( self.__moduleProperties[ 'executors' ] ) < 1:
return S_ERROR( "At least one executor method has to be defined" )
if not self.am_Enabled():
return S_ERROR( "Agent is disabled via the configuration" )
self.log.notice( "="*40 )
self.log.notice( "Loaded agent module %s" % self.__moduleProperties[ 'fullName' ] )
self.log.notice( " Site: %s" % DIRAC.siteName() )
self.log.notice( " Setup: %s" % gConfig.getValue( "/DIRAC/Setup" ) )
self.log.notice( " Base Module version: %s " % __RCSID__ )
self.log.notice( " Agent version: %s" % self.__codeProperties[ 'version' ] )
self.log.notice( " DIRAC version: %s" % DIRAC.version )
self.log.notice( " DIRAC platform: %s" % DIRAC.getPlatform() )
pollingTime = int( self.am_getOption( 'PollingTime' ) )
if pollingTime > 3600:
self.log.notice( " Polling time: %s hours" % ( pollingTime / 3600. ) )
else:
self.log.notice( " Polling time: %s seconds" % self.am_getOption( 'PollingTime' ) )
self.log.notice( " Control dir: %s" % self.am_getControlDirectory() )
self.log.notice( " Work dir: %s" % self.am_getWorkDirectory() )
if self.am_getOption( 'MaxCycles' ) > 0:
self.log.notice( " Cycles: %s" % self.am_getMaxCycles() )
else:
self.log.notice( " Cycles: unlimited" )
if self.am_getWatchdogTime() > 0:
self.log.notice( " Watchdog interval: %s" % self.am_getWatchdogTime() )
else:
self.log.notice( " Watchdog interval: disabled " )
self.log.notice( "="*40 )
self.__initialized = True
return S_OK()
def am_getControlDirectory( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'ControlDirectory' ) ) )
def am_getStopAgentFile( self ):
return os.path.join( self.am_getControlDirectory(), 'stop_agent' )
def am_checkStopAgentFile( self ):
return os.path.isfile( self.am_getStopAgentFile() )
def am_createStopAgentFile( self ):
try:
with open( self.am_getStopAgentFile(), 'w' ) as fd:
fd.write( 'Dirac site agent Stopped at %s' % Time.toString() )
except Exception:
pass
def am_removeStopAgentFile( self ):
try:
os.unlink( self.am_getStopAgentFile() )
except Exception:
pass
def am_getBasePath( self ):
return self.__basePath
def am_getWorkDirectory( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'WorkDirectory' ) ) )
def am_getShifterProxyLocation( self ):
return os.path.join( self.__basePath, str( self.am_getOption( 'shifterProxyLocation' ) ) )
def am_getOption( self, optionName, defaultValue = None ):
if defaultValue is None:
if optionName in self.__configDefaults:
defaultValue = self.__configDefaults[ optionName ]
if optionName and optionName[0] == "/":
return gConfig.getValue( optionName, defaultValue )
for section in ( self.__moduleProperties[ 'section' ], self.__moduleProperties[ 'loadSection' ] ):
result = gConfig.getOption( "%s/%s" % ( section, optionName ), defaultValue )
if result[ 'OK' ]:
return result[ 'Value' ]
return defaultValue
def am_setOption( self, optionName, value ):
self.__configDefaults[ optionName ] = value
def am_getModuleParam( self, optionName ):
return self.__moduleProperties[ optionName ]
def am_setModuleParam( self, optionName, value ):
self.__moduleProperties[ optionName ] = value
def am_getPollingTime( self ):
return self.am_getOption( "PollingTime" )
def am_getMaxCycles( self ):
return self.am_getOption( "MaxCycles" )
def am_getWatchdogTime( self ):
return int( self.am_getOption( "WatchdogTime" ) )
def am_getCyclesDone( self ):
return self.am_getModuleParam( 'cyclesDone' )
def am_Enabled( self ):
return self.am_getOption( "Enabled" )
def am_disableMonitoring( self ):
self.am_setOption( 'MonitoringEnabled' , False )
def am_monitoringEnabled( self ):
return self.am_getOption( "MonitoringEnabled" )
def am_stopExecution( self ):
self.am_setModuleParam( 'alive', False )
def __initializeMonitor( self ):
"""
Initialize the system monitor client
"""
if self.__moduleProperties[ 'standalone' ]:
self.monitor = gMonitor
else:
self.monitor = MonitoringClient()
self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )
self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )
self.monitor.initialize()
self.monitor.registerActivity( 'CPU', "CPU Usage", 'Framework', "CPU,%", self.monitor.OP_MEAN, 600 )
self.monitor.registerActivity( 'MEM', "Memory Usage", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )
# Component monitor
for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):
self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )
self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )
self.monitor.setComponentExtraParam( 'cycles', 0 )
self.monitor.disable()
self.__monitorLastStatsUpdate = time.time()
def am_secureCall( self, functor, args = (), name = False ):
if not name:
name = str( functor )
try:
result = functor( *args )
if not isReturnStructure( result ):
raise Exception( "%s method for %s module has to return S_OK/S_ERROR" % ( name, self.__moduleProperties[ 'fullName' ] ) )
return result
except Exception as e:
self.log.exception( "Agent exception while calling method %s" % name, lException = e )
return S_ERROR( "Exception while calling %s method: %s" % ( name, str( e ) ) )
def _setShifterProxy( self ):
if self.__moduleProperties[ "shifterProxy" ]:
result = setupShifterProxyInEnv( self.__moduleProperties[ "shifterProxy" ],
self.am_getShifterProxyLocation() )
if not result[ 'OK' ]:
self.log.error( "Failed to set shifter proxy", result['Message'] )
return result
return S_OK()
def am_go( self ):
# Set the shifter proxy if required
result = self._setShifterProxy()
if not result[ 'OK' ]:
return result
self.log.notice( "-"*40 )
self.log.notice( "Starting cycle for module %s" % self.__moduleProperties[ 'fullName' ] )
mD = self.am_getMaxCycles()
if mD > 0:
cD = self.__moduleProperties[ 'cyclesDone' ]
self.log.notice( "Remaining %s of %s cycles" % ( mD - cD, mD ) )
self.log.notice( "-"*40 )
# use SIGALARM as a watchdog interrupt if enabled
watchdogInt = self.am_getWatchdogTime()
if watchdogInt > 0:
signal.signal( signal.SIGALRM, signal.SIG_DFL )
signal.alarm( watchdogInt )
elapsedTime = time.time()
cpuStats = self._startReportToMonitoring()
cycleResult = self.__executeModuleCycle()
if cpuStats:
self._endReportToMonitoring( *cpuStats )
# Increment counters
self.__moduleProperties[ 'cyclesDone' ] += 1
# Show status
elapsedTime = time.time() - elapsedTime
self.__moduleProperties[ 'totalElapsedTime' ] += elapsedTime
self.log.notice( "-"*40 )
self.log.notice( "Agent module %s run summary" % self.__moduleProperties[ 'fullName' ] )
self.log.notice( " Executed %s times previously" % self.__moduleProperties[ 'cyclesDone' ] )
self.log.notice( " Cycle took %.2f seconds" % elapsedTime )
averageElapsedTime = self.__moduleProperties[ 'totalElapsedTime' ] / self.__moduleProperties[ 'cyclesDone' ]
self.log.notice( " Average execution time: %.2f seconds" % ( averageElapsedTime ) )
elapsedPollingRate = averageElapsedTime * 100 / self.am_getOption( 'PollingTime' )
self.log.notice( " Polling time: %s seconds" % self.am_getOption( 'PollingTime' ) )
self.log.notice( " Average execution/polling time: %.2f%%" % elapsedPollingRate )
if cycleResult[ 'OK' ]:
self.log.notice( " Cycle was successful" )
else:
self.log.warn( " Cycle had an error:", cycleResult[ 'Message' ] )
self.log.notice( "-"*40 )
# Update number of cycles
self.monitor.setComponentExtraParam( 'cycles', self.__moduleProperties[ 'cyclesDone' ] )
# cycle finished successfully, cancel watchdog
if watchdogInt > 0:
signal.alarm(0)
return cycleResult
def _startReportToMonitoring( self ):
try:
now = time.time()
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 10:
return ( now, cpuTime )
# Send CPU consumption mark
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB( 'VmRSS:' )
if membytes:
mem = membytes / ( 1024. * 1024. )
gMonitor.addMark( 'MEM', mem )
return( now, cpuTime )
except Exception:
return False
def _endReportToMonitoring( self, initialWallTime, initialCPUTime ):
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = 0
if wallTime:
percentage = cpuTime / wallTime * 100.
if percentage > 0:
gMonitor.addMark( 'CPU', percentage )
def __executeModuleCycle( self ):
# Execute the beginExecution function
result = self.am_secureCall( self.beginExecution, name = "beginExecution" )
if not result[ 'OK' ]:
return result
# Launch executor functions
executors = self.__moduleProperties[ 'executors' ]
if len( executors ) == 1:
result = self.am_secureCall( executors[0][0], executors[0][1] )
if not result[ 'OK' ]:
return result
else:
exeThreads = [ threading.Thread( target = executor[0], args = executor[1] ) for executor in executors ]
for thread in exeThreads:
thread.setDaemon( 1 )
thread.start()
for thread in exeThreads:
thread.join()
# Execute the endExecution function
return self.am_secureCall( self.endExecution, name = "endExecution" )
def initialize( self, *args, **kwargs ):
return S_OK()
def beginExecution( self ):
return S_OK()
def endExecution( self ):
return S_OK()
def finalize( self ):
return S_OK()
def execute( self ):
return S_ERROR( "Execute method has to be overwritten by agent module" )
| arrabito/DIRAC | Core/Base/AgentModule.py | Python | gpl-3.0 | 18,318 | [
"DIRAC"
] | 379a945f65e5a28bb3a88f9f3fa2fdbf6f09f193873d4a06c16ebe965f547971 |
from pathlib import Path
import shutil
import tempfile
import pytest
from pysisyphus.benchmarks import Benchmark
from pysisyphus.helpers import align_geoms
from pysisyphus.run import run_from_dict
from pysisyphus.testing import using
from pysisyphus.xyzloader import write_geoms_to_trj
Bh = Benchmark(
"birkholz_rx",
# exclude=list(range(14)),
# 03 cope; GSM can't handle this
# 16 cope; GSM can't handle this
# 18 has no TS at the GFN2-XTB level of theory
exclude=(3, 16, 18),
)
@pytest.mark.benchmark
@using("xtb")
@pytest.mark.parametrize("fn, geoms, charge, mult, ref_energy", Bh.geom_iter)
def test_birkholz_rx_gsm(fn, geoms, charge, mult, ref_energy, results_bag):
start, ts_ref_org, end = geoms
id_ = fn[:2]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
inp_ts = str(tmp_path / "ts_input.trj")
with open(inp_ts, "w") as handle:
handle.write(ts_ref_org.as_xyz())
ts_run_dict = {
"geom": {
"type": "redund",
"fn": inp_ts,
},
"calc": {
"type": "xtb",
"pal": 6,
"mem": 750,
"charge": charge,
"mult": mult,
"quiet": True,
},
"tsopt": {
"type": "rsirfo",
"hessian_recalc": 1,
"trust_max": 0.3,
"thresh": "gau",
"do_hess": True,
},
}
ts_results = run_from_dict(ts_run_dict)
# Reference values
ts_ref = ts_results.ts_geom
ts_ref_energy = ts_ref.energy
ts_ref_imag = ts_ref.get_imag_frequencies()[0]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
inp_trj = str(tmp_path / "gs_inputs.trj")
write_geoms_to_trj((start, end), inp_trj)
run_dict = {
"geom": {
"type": "cart" if id_ == "02" else "dlc",
"fn": inp_trj,
},
"calc": {
"type": "xtb",
"pal": 6,
"mem": 750,
"charge": charge,
"mult": mult,
},
"preopt": {
"max_cycles": 5,
},
"cos": {
"type": "gs",
# "fix_ends": True,
# "max_nodes": 11,
# "reparam_check": "rms",
# "perp_thresh": 0.075,
"climb": True,
"climb_rms": 0.01,
# "climb_lanczos": True,
# "climb_lanczos_rms": 0.0075,
# "reset_dlc": True,
},
"opt": {
"type": "string",
"max_step": 0.2,
# "lbfgs_when_full": True,
# "max_step": 0.25,
# "keep_last": 10,
"rms_force": 0.005,
"rms_force_only": True,
# "double_damp": True,
},
"tsopt": {
"type": "rsirfo",
"do_hess": True,
"thresh": "gau",
"trust_max": 0.3,
"max_cycles": 100,
},
}
results = run_from_dict(run_dict)
ts_geom = results.ts_geom
ts_energy = ts_geom.energy
ts_imag = ts_geom.get_imag_frequencies()[0]
rmsd = ts_ref.rmsd(ts_geom)
diff = ts_ref_energy - ts_energy
cmt = "Ref" if diff < 0.0 else " TS"
rmsd_fmt = " >12.4f"
print(f"RMSD: {rmsd:{rmsd_fmt}}")
print(f" TS energy: {ts_energy:.6f}")
print(f"Ref energy: {ts_ref_energy:.6f}")
print(f" Diff: {diff:.6f}")
print(
f"@@@{id_} COMPARE@@@: rmsd={rmsd:{rmsd_fmt}}, ΔE= {diff: .6f} {cmt} is lower, "
f"Ref: {ts_ref_imag: >8.1f}, TS: {ts_imag: >8.1f} cm⁻¹"
)
assert results.ts_opt.is_converged
shutil.copy("ts_opt.xyz", f"{id_}_ts_opt.xyz")
# Dump TS geoms
ts_ref_org.comment = "TS ref org"
ts_ref.comment = "TS ref opt"
ts_geom.comment = "TS opt from cos"
ts_geoms = (ts_ref_org, ts_ref, ts_geom)
align_geoms(ts_geoms)
ts_fns = f"{id_}_ts_geoms.trj"
write_geoms_to_trj(ts_geoms, ts_fns)
| eljost/pysisyphus | tests/test_birkholz_rx/test_birkholz_rx.py | Python | gpl-3.0 | 4,385 | [
"xTB"
] | 333dc76cb2e8f53bebd60e955878849b7f328d8bf70e66b2b9b0373ea403ac6e |
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr7_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom7.phy", header=None, index=None)
print(tott.shape)
| evanbiederstedt/RRBSfun | trees/chrom_scripts/normal_chr07.py | Python | mit | 25,843 | [
"MCell"
] | b50595ea7b7920869f80378675c8fa6a2c711b35ea761c2949b1707d0810e8d1 |
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement Box."""
import numpy as np
from functools import partial
import hoomd._hoomd as _hoomd
def _make_vec3(vec, vec_factory, scalar_type):
"""Converts Python types to HOOMD T3 classes (e.g. Scalar3, Int3).
Args:
vec (Sequence[T] or T): A sequence or scalar of type ``scalar_type``.
vec_factory (function): A function from `hoomd._hoomd` that makes a T3
class (e.g. Scalar3, Int3).
scalar_type (class): A class defining the base type ``T`` for the
vec_factory function. For `Scalar3` this would be `float`.
"""
try:
l_vec = len(vec)
except TypeError:
try:
v = scalar_type(vec)
except (ValueError, TypeError):
raise ValueError("Expected value of type {}.".format(scalar_type))
else:
return vec_factory(v, v, v)
if l_vec == 3:
try:
return vec_factory(scalar_type(vec[0]), scalar_type(vec[1]),
scalar_type(vec[2]))
except (ValueError, TypeError):
raise ValueError("Expected values of type {}.".format(scalar_type))
else:
raise ValueError("Expected a sequence of three values or a single "
"value. Received {} values.".format(len(vec)))
_make_scalar3 = partial(_make_vec3,
vec_factory=_hoomd.make_scalar3,
scalar_type=float)
_make_int3 = partial(_make_vec3, vec_factory=_hoomd.make_int3, scalar_type=int)
_make_char3 = partial(_make_vec3,
vec_factory=_hoomd.make_char3,
scalar_type=int)
def _vec3_to_array(vec, dtype=None):
return np.array((vec.x, vec.y, vec.z), dtype=dtype)
class Box:
"""Define box dimensions.
Args:
Lx (float): box extent in the x direction :math:`[\\mathrm{length}]`.
Ly (float): box extent in the y direction :math:`[\\mathrm{length}]`.
Lz (float): box extent in the z direction :math:`[\\mathrm{length}]`.
xy (float): tilt factor xy :math:`[\\mathrm{dimensionless}]`.
xz (float): tilt factor xz :math:`[\\mathrm{dimensionless}]`.
yz (float): tilt factor yz :math:`[\\mathrm{dimensionless}]`.
Simulation boxes in hoomd are specified by six parameters, ``Lx``, ``Ly``,
``Lz``, ``xy``, ``xz``, and ``yz``. `Box` provides a way to specify all
six parameters for a given box and perform some common operations with them.
A `Box` can be passed to an initialization method or to assigned to a
saved :py:class:`State` variable (``state.box = new_box``) to set the
simulation box.
Access attributes directly::
box = hoomd.Box.cube(L=20)
box.xy = 1.0
box.yz = 0.5
box.Lz = 40
.. rubric:: Two dimensional systems
2D simulations in HOOMD use boxes with ``Lz == 0``. 2D boxes ignore ``xz``
and ``yz``. If a new `Box` is assigned to a system with different
dimensionality, a warning will be shown.
In 2D boxes, *volume* is in units of area.
.. rubric:: Factory Methods
`Box` has factory methods to enable easier creation of boxes: `cube`,
`square`, `from_matrix`, and `from_box`. See the method documentation for
usage.
Examples:
* Cubic box with given length: ``hoomd.Box.cube(L=1)``
* Square box with given length: ``hoomd.Box.square(L=1)``
* From an upper triangular matrix: ``hoomd.Box.from_matrix(matrix)``
* Specify values: ``hoomd.Box(Lx=1., Ly=2., Lz=3., xy=1., xz=2., yz=3.)``
"""
# Constructors
def __init__(self, Lx, Ly, Lz=0, xy=0, xz=0, yz=0):
if Lz == 0 and (xz != 0 or yz != 0):
raise ValueError("Cannot set the xz or yz tilt factor on a 2D box.")
self._cpp_obj = _hoomd.BoxDim(Lx, Ly, Lz)
self._cpp_obj.setTiltFactors(xy, xz, yz)
@classmethod
def cube(cls, L):
"""Create a cube with side lengths ``L``.
Args:
L (float): The box side length :math:`[\\mathrm{length}]`.
Returns:
hoomd.Box: The created 3D box.
"""
return cls(L, L, L, 0, 0, 0)
@classmethod
def square(cls, L):
"""Create a square with side lengths ``L``.
Args:
L (float): The box side length :math:`[\\mathrm{length}]`.
Returns:
hoomd.Box: The created 2D box.
"""
return cls(L, L, 0, 0, 0, 0)
@classmethod
def from_matrix(cls, box_matrix):
"""Create a box from an upper triangular matrix.
Args:
box_matrix ((3, 3) `numpy.ndarray` of `float`): An upper
triangular matrix representing a box. The values for ``Lx``,
``Ly``, ``Lz``, ``xy``, ``xz``, and ``yz`` are related to the
matrix by the following expressions.
.. code-block:: python
[[Lx, Ly * xy, Lz * xz],
[0, Ly, Lz * yz],
[0, 0, Lz]]
Returns:
hoomd.Box: The created box.
"""
b = cls(0, 0)
b.matrix = box_matrix
return b
@classmethod
def _from_cpp(cls, cpp_obj):
"""Wrap a C++ BoxDim.
Does not copy the C++ object.
"""
b = Box(0, 0)
b._cpp_obj = cpp_obj
return b
@classmethod
def from_box(cls, box):
R"""Initialize a Box instance from a box-like object.
Args:
box:
A box-like object
Note:
Objects that can be converted to HOOMD-blue boxes include lists like
``[Lx, Ly, Lz, xy, xz, yz]``, dictionaries with keys ``'Lx',
'Ly', 'Lz', 'xy', 'xz', 'yz'``, objects with attributes ``Lx, Ly,
Lz, xy, xz, yz``, 3x3 matrices (see `from_matrix`), or existing
`hoomd.Box` objects.
If any of ``Lz, xy, xz, yz`` are not provided, they will be set to 0.
If all values are provided, a triclinic box will be constructed.
If only ``Lx, Ly, Lz`` are provided, an orthorhombic box will
be constructed. If only ``Lx, Ly`` are provided, a rectangular
(2D) box will be constructed.
Returns:
:class:`hoomd.Box`: The resulting box object.
"""
if np.asarray(box).shape == (3, 3):
# Handles 3x3 matrices
return cls.from_matrix(box)
try:
# Handles hoomd.box.Box and objects with attributes
Lx = box.Lx
Ly = box.Ly
Lz = getattr(box, 'Lz', 0)
xy = getattr(box, 'xy', 0)
xz = getattr(box, 'xz', 0)
yz = getattr(box, 'yz', 0)
except AttributeError:
try:
# Handle dictionary-like
Lx = box['Lx']
Ly = box['Ly']
Lz = box.get('Lz', 0)
xy = box.get('xy', 0)
xz = box.get('xz', 0)
yz = box.get('yz', 0)
except (IndexError, KeyError, TypeError):
if not len(box) in [2, 3, 6]:
raise ValueError(
"List-like objects must have length 2, 3, or 6 to be "
"converted to freud.box.Box.")
# Handle list-like
Lx = box[0]
Ly = box[1]
Lz = box[2] if len(box) > 2 else 0
xy, xz, yz = box[3:6] if len(box) == 6 else (0, 0, 0)
except: # noqa
raise
return cls(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz)
# Dimension based properties
@property
def dimensions(self):
"""int: The dimensionality of the box.
If ``Lz == 0``, the box is treated as 2D, otherwise it is 3D. This
property is not settable.
"""
return 2 if self.is2D else 3
@property
def is2D(self): # noqa: N802 - allow function name
"""bool: Flag whether the box is 2D.
If ``Lz == 0``, the box is treated as 2D, otherwise it is 3D. This
property is not settable.
"""
return self.Lz == 0
# Length based properties
@property
def L(self): # noqa: N802 - allow function name
"""(3, ) `numpy.ndarray` of `float`: The box lengths, ``[Lx, Ly, Lz]`` \
:math:`[\\mathrm{length}]`.
Can be set with a float which sets all lengths, or a length 3 vector.
"""
return _vec3_to_array(self._cpp_obj.getL())
@L.setter
def L(self, new_L): # noqa: N802: Allow function name
newL = _make_scalar3(new_L)
if newL.z == 0 and not self.is2D:
self.tilts = [self.xy, 0, 0]
self._cpp_obj.setL(newL)
@property
def Lx(self): # noqa: N802: Allow function name
"""float: The length of the box in the x dimension \
:math:`[\\mathrm{length}]`."""
return self.L[0]
@Lx.setter
def Lx(self, value): # noqa: N802: Allow function name
L = self.L
L[0] = float(value)
self.L = L
@property
def Ly(self): # noqa: N802: Allow function name
"""float: The length of the box in the y dimension \
:math:`[\\mathrm{length}]`."""
return self.L[1]
@Ly.setter
def Ly(self, value): # noqa: N802: Allow function name
L = self.L
L[1] = float(value)
self.L = L
@property
def Lz(self): # noqa: N802: Allow function name
"""float: The length of the box in the z dimension \
:math:`[\\mathrm{length}]`."""
return self.L[2]
@Lz.setter
def Lz(self, value): # noqa: N802: Allow function name
L = self.L
L[2] = float(value)
self.L = L
# Box tilt based properties
@property
def tilts(self):
"""(3, ) `numpy.ndarray` of `float`: The box tilts, ``[xy, xz, yz]``.
Can be set using one tilt for all axes or three tilts. If the box is 2D
``xz`` and ``yz`` will automatically be set to zero.
"""
return np.array([self.xy, self.xz, self.yz])
@tilts.setter
def tilts(self, new_tilts):
new_tilts = _make_scalar3(new_tilts)
if self.is2D and (new_tilts.y != 0 or new_tilts.z != 0):
raise ValueError("Cannot set the xz or yz tilt factor on a 2D box.")
self._cpp_obj.setTiltFactors(new_tilts.x, new_tilts.y, new_tilts.z)
@property
def xy(self):
"""float: The tilt for the xy plane."""
return self._cpp_obj.getTiltFactorXY()
@xy.setter
def xy(self, xy):
self.tilts = [xy, self.xz, self.yz]
@property
def xz(self):
"""float: The tilt for the xz plane."""
return self._cpp_obj.getTiltFactorXZ()
@xz.setter
def xz(self, xz):
if self.is2D:
raise ValueError("Cannot set xz tilt factor on a 2D box.")
self.tilts = [self.xy, xz, self.yz]
@property
def yz(self):
"""float: The tilt for the yz plane."""
return self._cpp_obj.getTiltFactorYZ()
@yz.setter
def yz(self, yz):
if self.is2D:
raise ValueError("Cannot set yz tilt factor on a 2D box.")
self.tilts = [self.xy, self.xz, yz]
# Misc. properties
@property
def periodic(self):
"""(3, ) `numpy.ndarray` of `bool`: The periodicity of each \
dimension."""
return _vec3_to_array(self._cpp_obj.getPeriodic(), bool)
@property
def lattice_vectors(self):
"""(3, 3) `numpy.ndarray` of `float`: Box lattice vectors.
The lattice vectors are read-only.
"""
return np.concatenate([
_vec3_to_array(self._cpp_obj.getLatticeVector(i)) for i in range(3)
]).reshape(3, 3)
@property
def volume(self):
"""float: Volume of the box.
:math:`[\\mathrm{length}^{2}]` in 2D and
:math:`[\\mathrm{length}^{3}]` in 3D.
When setting volume the aspect ratio of the box is maintained while the
lengths are changed.
"""
return self._cpp_obj.getVolume(self.is2D)
@volume.setter
def volume(self, volume):
self.scale((volume / self.volume)**(1 / self.dimensions))
@property
def matrix(self):
"""(3, 3) `numpy.ndarray` `float`: The upper triangular matrix that \
defines the box.
Can be used to set the box to one defined by an upper triangular
matrix.
.. code-block:: python
[[Lx, Ly * xy, Lz * xz],
[0, Ly, Lz * yz],
[0, 0, Lz]]
"""
Lx, Ly, Lz = self.L
xy, xz, yz = self.tilts
return np.array([[Lx, xy * Ly, xz * Lz], [0, Ly, yz * Lz], [0, 0, Lz]])
@matrix.setter
def matrix(self, box_matrix):
box_matrix = np.asarray(box_matrix)
if not np.allclose(box_matrix, np.triu(box_matrix)):
raise ValueError("Box matrix must be upper triangular.")
if box_matrix.shape != (3, 3):
raise ValueError("Box matrix must be a 3x3 matrix.")
L = np.diag(box_matrix)
self.L = L
self.xy = box_matrix[0, 1] / L[1]
self.xz = box_matrix[0, 2] / L[2]
self.yz = box_matrix[1, 2] / L[2]
def scale(self, s):
R"""Scale box dimensions.
Scales the box in place by the given scale factors. Tilt factors are not
modified.
Args:
s (float or list[float]): scale factors in each dimension. If a
single float is given then scale all dimensions by s; otherwise,
s must be a sequence of 3 values used to scale each dimension.
Returns:
``self``
"""
s = np.asarray(s, dtype=float)
self.L *= s
return self
# Magic Methods
def __repr__(self):
"""Executable representation of the object."""
return "hoomd.box.Box(Lx={}, Ly={}, Lz={}, xy={}, xz={}, yz={})".format(
self.Lx, self.Ly, self.Lz, self.xy, self.xz, self.yz)
def __eq__(self, other):
"""Test if boxes are equal."""
if not isinstance(other, Box):
return NotImplemented
return self._cpp_obj == other._cpp_obj
def __neq__(self, other):
"""Test if boxes are not equal."""
if not isinstance(other, Box):
return NotImplemented
return self._cpp_obj != other._cpp_obj
def __reduce__(self):
"""Reduce values to picklable format."""
return (type(self), (*self.L, *self.tilts))
| joaander/hoomd-blue | hoomd/box.py | Python | bsd-3-clause | 14,679 | [
"HOOMD-blue"
] | e952d2fe527253887d594a4a5b8cd6630d05ef2dd108ac71750f2ec35db873b6 |
import argparse
from math import pi as PI, ceil
from pathlib import Path
import sys
from pysisyphus.constants import AMU2KG
from pysisyphus.helpers import geom_loader
from pysisyphus.helpers_pure import get_input, highlight_text
from pysisyphus.io.pdb import geom_to_pdb_str
from pysisyphus.wrapper.packmol import make_input, call_packmol
from pysisyphus.db import LEVELS, MOLECULES
from pysisyphus.db.helpers import get_path as db_get_path
AMU2G = AMU2KG * 1e3
CM2ANG = 1e-8
def parse_args(args):
parser = argparse.ArgumentParser()
# Solvent
solvent_group = parser.add_mutually_exclusive_group(required=True)
solvent_group.add_argument("--solv", help="Filename of solvent geometry.")
solvent_group.add_argument(
"--db", action="store_true", help="Choose from internal database."
)
parser.add_argument(
"--solv_num", type=int, help="Number of solvent molecules to pack."
)
parser.add_argument("--solv_dens", type=float, help="Solvent density in g/cm³.")
parser.add_argument(
"--output", default="output.pdb", help="Filename of packed molecules."
)
# Solute
parser.add_argument("--solute", default=None, help="Filename of solute geometry.")
parser.add_argument(
"--solute_num", type=int, default=1, help="Number of solute molecules to pack."
)
return parser.parse_args(args)
def as_pdb(fn):
if not str(fn).endswith(".pdb"):
geom = geom_loader(fn)
pdb_str = geom_to_pdb_str(geom)
cwd = Path(".")
pdb_fn = cwd / Path(fn).with_suffix(".pdb").name
with open(pdb_fn, "w") as handle:
handle.write(pdb_str)
print(f"Converted '{fn}' to PDB format ('{pdb_fn}')")
return pdb_fn
def sphere_radius_from_volume(volume):
radius = (3 / 4 * volume / PI) ** (1 / 3)
return radius
def volume_for_density(molecule_num, mol_mass, density):
# Convert density from g/cm³ to amu/ų
density_au = density / AMU2G * CM2ANG ** 3
# The molar mass in g/mol is numerically equal to the value in AMU (dalton)
# so we can use it as it is.
total_mass = mol_mass * molecule_num
# Volume in Å
volume = total_mass / density_au
return volume
def print_info(title, geom):
print(title)
print(f"\t{geom}")
print(f"\tMolar mass: {geom.total_mass:.2f} g mol⁻¹")
print()
def run():
args = parse_args(sys.argv[1:])
solute_fn = args.solute
if solute_fn:
solute = geom_loader(solute_fn)
solute_num = args.solute_num
solute_mass = solute.total_mass
print_info("Solute", solute)
else:
solute = None
solute_mass = 0.0
solv_fn = args.solv
if solv_fn:
solv_dens = args.solv_dens
# Load from internal db
else:
print(highlight_text("Interactive solvent selection"))
level = get_input(LEVELS, "Level of theory", lbl_func=lambda lvl: lvl[0])
print()
molecule = get_input(MOLECULES, "Molecule", lbl_func=lambda mol: mol.name)
print()
solv_fn = db_get_path(molecule.name, level[0])
solv_dens = molecule.density
solv = geom_loader(solv_fn)
solv_num = args.solv_num
solv_mass = solv.total_mass
print_info("Solvent", solv)
solute_solv_mass = solute_mass + solv_num * solv_mass
print(f"Total mass of solute(s) and solvent(s): {solute_solv_mass:.2f} amu")
print()
# Solvent volume
solv_vol = volume_for_density(solv_num, solv_mass, solv_dens)
print(f"Solvent volume: {solv_vol:>10.2f} ų")
# Solute volume; Use the solvent density for this calculation
if solute:
solute_vol = volume_for_density(solute_num, solute_mass, solv_dens)
print(f" Solute volume: {solute_vol:>10.2f} ų")
else:
solute_vol = 0.0
total_vol = solv_vol + solute_vol
print(f" Total volume: {total_vol:>10.2f} ų")
print()
radius = sphere_radius_from_volume(total_vol)
print(f" Sphere radius: {radius:>8.2f} Å")
cradius = ceil(radius)
print(f"Using ceil(radius): {cradius:>8.2f} Å")
print()
# Create solute/solvent PDBs if needed
inp_kwargs = {
"output_fn": args.output,
"solvent_fn": as_pdb(solv_fn),
"solvent_num": solv_num,
"sphere_radius": cradius,
}
if solute:
inp_kwargs.update({"solute_fn": as_pdb(solute_fn), "solute_num": solute_num})
inp = make_input(**inp_kwargs)
inp_fn = "packmol.inp"
with open(inp_fn, "w") as handle:
handle.write(inp)
print(f"Wrote packmol input to '{inp_fn}'")
proc = call_packmol(inp)
log_fn = "packmol.log"
with open(log_fn, "w") as handle:
handle.write(proc.stdout)
print(f"Wrote packmol ouput to '{log_fn}'")
print()
return_ = proc.returncode
if return_ != 0:
print(proc.stdout)
else:
print("packmol returned successfully!")
return return_
| eljost/pysisyphus | pysisyphus/pack.py | Python | gpl-3.0 | 4,954 | [
"Dalton"
] | dda653fd5eb07e20a8c91db6f448e100cae879869359e6f142851a58cf2fa6d8 |
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, unicode_literals,
print_function)
import json
import logging
import os
import re
import subprocess
import warnings
import dlib
import numpy
import pathlib2
import six
import skimage
import skimage.color
import skimage.exposure
import skimage.feature
import skimage.io
import skimage.transform
import zbar
from PIL import Image
from pycolorname.pantone.pantonepaint import PantonePaint
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from file_metadata.generic_file import GenericFile
from file_metadata.utilities import (DictNoNone, app_dir, bz2_decompress,
download, to_cstr, memoized, DATA_PATH)
# A Decompression Bomb is a small compressed image file which when decompressed
# uses a uge amount of RAM. For example, a monochrome PNG file with 100kx100k
# pixels. This tells PIL to make this warning into an error.
warnings.simplefilter('error', Image.DecompressionBombWarning)
class ImageFile(GenericFile):
mimetypes = ()
def config(self, key, new_defaults=()):
defaults = {
"max_decompressed_size": int(1024 ** 3 / 4 / 3) # In bytes
}
defaults.update(dict(new_defaults)) # Update the defaults from child
return super(ImageFile, self).config(key, new_defaults=defaults)
@classmethod
def create(cls, *args, **kwargs):
cls_file = cls(*args, **kwargs)
mime = cls_file.mime()
_type, subtype = mime.split('/', 1)
if mime == 'image/jpeg':
from file_metadata.image.jpeg_file import JPEGFile
return JPEGFile.create(*args, **kwargs)
elif _type in ('image', 'application') and subtype == 'x-xcf':
from file_metadata.image.xcf_file import XCFFile
return XCFFile.create(*args, **kwargs)
elif mime == 'image/tiff':
from file_metadata.image.tiff_file import TIFFFile
return TIFFFile.create(*args, **kwargs)
elif cls_file.is_type('svg'):
from file_metadata.image.svg_file import SVGFile
return SVGFile.create(*args, **kwargs)
return cls(*args, **kwargs)
def is_type(self, key):
if key == 'alpha':
return self.fetch('pillow').mode in ('LA', 'RGBA')
return super(ImageFile, self).is_type(key)
@memoized
def fetch(self, key=''):
if key == 'filename_raster':
# A raster filename holds the file in a raster graphic format
return self.fetch('filename')
elif key == 'filename_zxing':
return pathlib2.Path(self.fetch('filename_raster')).as_uri()
elif key == 'ndarray':
Image.MAX_IMAGE_PIXELS = self.config('max_decompressed_size')
try:
image_array = skimage.io.imread(self.fetch('filename_raster'))
if image_array.shape == (2,):
# Assume this is related to
# https://github.com/scikit-image/scikit-image/issues/2154
return image_array[0]
return image_array
except Image.DecompressionBombWarning:
logging.warn('The file "{0}" contains a lot of pixels and '
'can take a lot of memory when decompressed. '
'To allow larger images, modify the '
'"max_decompressed_size" config.'
.format(self.fetch('filename')))
# Use empty array as the file cannot be read.
return numpy.ndarray(0)
elif key == 'ndarray_grey':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return skimage.img_as_ubyte(
skimage.color.rgb2grey(self.fetch('ndarray')))
elif key == 'ndarray_hsv':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return skimage.img_as_ubyte(
skimage.color.rgb2hsv(self.fetch('ndarray_noalpha')))
elif key == 'ndarray_noalpha':
if self.is_type('alpha'):
return self.alpha_blend(self.fetch('ndarray'))
return self.fetch('ndarray')
elif key == 'pillow':
pillow_img = Image.open(self.fetch('filename_raster'))
self.closables.append(pillow_img)
return pillow_img
return super(ImageFile, self).fetch(key)
@staticmethod
def alpha_blend(img, background=255):
"""
Take an image, assume the last channel is a alpha channel and remove it
by using the appropriate background.
:param img: The image to alpha blend into given background.
:param background: The background color to use when alpha blending.
A scalar is expected, which is used for all
the channels.
"""
alpha = img[..., -1] / 255.0
channels = img[..., :-1]
new_img = numpy.zeros_like(channels)
for ichan in range(channels.shape[-1]):
new_img[..., ichan] = numpy.clip(
(1 - alpha) * background + alpha * channels[..., ichan],
a_min=0, a_max=255)
return new_img
def analyze_geolocation(self, use_nominatim=True):
"""
Find the location where the photo was taken initially. This is
information which is got using the latitude/longitude in EXIF data.
:param use_nominatim: Whether to use reverse geocoding from nominatim
or not.
:return: dict with the keys:
- Composite:Country - The country the photo was taken.
- Composite:City - The city the photo was taken.
"""
exif = self.exiftool()
data = {}
def dms2dec(dms_str, sign=None):
"""
Return decimal representation of DMS string: DDD deg MM' SS.SS"
"""
dms_regex = r'(?P<deg>-?\d+) deg (?P<min>\d+)\' (?P<sec>\d+\.\d+)"'
dms = re.match(dms_regex, dms_str.strip().lower()).groups()
_deg, _min, _sec = map(float, dms)
dec = _deg + _min / 60 + _sec / 3600
if '-' in dms_str: # Use negative sign if given
return dec
elif re.search('[sw]', dms_str.lower()): # Use S/W if given
return -dec
elif ((isinstance(sign, (int, float)) and sign > 0) or
(isinstance(sign, six.string_types) and
sign.strip().lower().startswith('n'))):
return dec
elif ((isinstance(sign, (int, float)) and sign < 0) or
(isinstance(sign, six.string_types) and
sign.strip().lower().startswith('s'))):
return -dec
return dec
lat, lon = None, None
for grp in ('EXIF', 'XMP'):
lat_ref = exif.get(grp + ':GPSLatitudeRef', '')
lon_ref = exif.get(grp + ':GPSLongitudeRef', '')
lat_dms = exif.get(grp + ':GPSLatitude', '')
lon_dms = exif.get(grp + ':GPSLongitude', '')
if not (lat_dms and lon_dms):
continue
lat, lon = dms2dec(lat_dms, lat_ref), dms2dec(lon_dms, lon_ref)
if lat is None or lon is None:
return {}
data = DictNoNone({'Composite:GPSLatitude': lat,
'Composite:GPSLongitude': lon})
if use_nominatim:
# Zoom levels: country = 0, megacity = 10, district = 10,
# city = 13, village = 15, street = 16, house = 18
url = ('http://nominatim.openstreetmap.org/reverse?format=json'
'&accept-language=en&lat={lat}&lon={lon}&zoom={zoom}'
.format(lat=lat, lon=lon, zoom=13))
try:
response = urlopen(url)
location = json.loads(response.read().decode('utf-8'))
except URLError as err:
logging.warn('An issue occured while querying nominatim '
'with: ' + url)
logging.exception(err)
return data
if isinstance(location, list) and len(location) == 0:
return data # No location found
addr = location.get('address', {})
data['Composite:GPSCountry'] = addr.get('country')
data['Composite:GPSState'] = addr.get('state')
data['Composite:GPSCity'] = addr.get('city')
return data
def analyze_color_calibration_target(self):
"""
Find whether there is a color calibration strip on top of the image.
"""
grey_array = self.fetch('ndarray_grey')
image_array = self.fetch('ndarray')
if grey_array is None:
return {}
# For the images we're testing, the IT8 bar takes about 20% of the
# image and also in the 20% we need the mid area
bary = int(0.2 * grey_array.shape[0])
def bar_intensity(x):
sampley = max(int(0.1 * x.shape[0]), 2)
return numpy.mean(
x[(x.shape[0] - sampley) // 2:(x.shape[0] + sampley) // 2,
:, ...],
axis=0)
topbar = bar_intensity(grey_array[:bary, :, ...])
botbar = bar_intensity(grey_array[-bary:, :, ...])
def _merge_near(arr):
out = []
last_elem = arr[0]
out.append(last_elem)
for elem in arr[1:]:
if elem != last_elem:
out.append(elem)
last_elem = elem
return numpy.asarray(out)
# Bottom bars seem to have smaller intensity because of the background
# Hence, we set a smaller threshold for peaks in bottom bars.
bot_spikes = _merge_near((numpy.diff(botbar)) > -2.5).sum()
top_spikes = _merge_near((numpy.diff(topbar)) < 3).sum()
top_grey_mse, bot_grey_mse = 0, 0
if image_array.ndim == 3:
for chan in range(image_array.shape[2]):
top_grey_mse += (
(image_array[bary:, :, chan] -
grey_array[bary:]) ** 2).mean()
bot_grey_mse += (
(image_array[-bary, :, chan] -
grey_array[-bary]) ** 2).mean()
top_grey_mse /= 3.0
bot_grey_mse /= 3.0
data = {}
if 15 < top_spikes < 25:
data['Color:IT8TopBar'] = top_spikes
data['Color:IT8TopBarGreyMSE'] = top_grey_mse
if 15 < bot_spikes < 25:
data['Color:IT8BottomBar'] = bot_spikes
data['Color:IT8BottomBarGreyMSE'] = bot_grey_mse
return data
def analyze_stereo_card(self):
"""
Find whether the given image is a stereo card or not.
"""
image_array = self.fetch('ndarray_grey')
if image_array is None:
return {}
def _full_histogram(img):
return numpy.histogram(img, bins=range(256))[0]
h, w = image_array.shape[:2]
# Remove corners as that's probably the edges and gradient etc.
roi = image_array[int(0.1 * h):int(0.9 * h),
int(0.1 * w):int(0.9 * w), ...]
_, width = roi.shape[:2]
left = roi[:, :width // 2]
right = roi[:, width // 2 + (width % 2):]
mean_square_err = ((left - right) ** 2).mean()
histogram_mse = (
((_full_histogram(left) - _full_histogram(right)) ** 2).mean() /
left.size)
return {'Misc:StereoCardMSE': mean_square_err,
'Misc:StereoCardHistogramMSE': histogram_mse}
def analyze_color_info(self,
grey_shade_threshold=0.05,
freq_colors_threshold=0.1,
edge_ratio_gaussian_sigma=1):
"""
Find the average RGB color of the image and compare with the existing
Pantone color system to identify the color name.
:param grey_shade_threshold:
The threshold to select a grey shade in NumberOfGreyShades.
Percent of the most frequent shade (Range from 0 to 1).
:param freq_colors_threshold:
The threshold to select a peak in PercentFrequentColors.
Percent of the most frequent shade (Range from 0 to 1).
:param edge_ratio_gaussian_sigma:
The sigma to use in gaussian blurring in Canny edge detection
for EdgeRatio.
:return: dict with the keys:
- Color:ClosestLabeledColorRGB - The closest RGB value of the
color found in the Pantone color palette.
- Color:ClosestLabeledColorRGB - The name of the closest color
found in the Pantone color palette.
- Color:AverageRGB - The average RGB value of the image.
- Color:NumberOfGreyShades - The number of grey shades that are
present more than a threshold percent of the most popular
greyscale in a greyscale image with intensities from 0 - 255.
- Color:PercentFrequentColors - The ratio of the number of colors
which occur frequently to the number of colors in the
palette.
- Color:EdgeRatio - The percentage of pixels in the picture where
edges are found.
- Color:MeanSquareErrorFromGrey - The mean square error fo each
pixel with respect to the greyscale equivalent image.
- Color:UsesAlpha - True if the alpha channel is present and being
used.
"""
image_array = self.fetch('ndarray_noalpha')
if image_array.ndim == 4: # Animated images
mean_color = image_array.mean(axis=(0, 1, 2))
elif image_array.ndim == 3 and image_array.shape[2] == 3: # Static
mean_color = image_array.mean(axis=(0, 1))
elif image_array.ndim == 2: # Greyscale images
avg = image_array.mean()
mean_color = (avg, avg, avg)
else:
msg = ('Unsupported image type in "analyze_color_info()". '
'Expected animated, greyscale, rgb, or rgba images. '
'Found an image with {0} dimensions and shape {1}. '
.format(image_array.ndim, image_array.shape))
logging.warn(msg)
return {}
# Find the mean color and the closest color in the known palette
closest_label, closest_color = PantonePaint().find_closest(mean_color)
grey_array = self.fetch('ndarray_grey')
def _full_histogram(img):
return numpy.histogram(img, bins=range(256))[0]
if image_array.ndim == 3 or image_array.ndim == 2:
# Find the edge ratio by applying the canny filter and finding
# bright spots. Not applicable to animated images.
scale = max(1.0, numpy.average(image_array.shape[:2]) / 500.0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
img_shape = map(lambda x: int(x / scale), grey_array.shape[:2])
grey_img = skimage.transform.resize(grey_array,
output_shape=img_shape,
preserve_range=True)
edge_img = skimage.feature.canny(grey_img,
sigma=edge_ratio_gaussian_sigma)
edge_ratio = (edge_img > 0).mean()
# Find the number of grey shades in the imag eusing the histogram.
grey_hist = _full_histogram(grey_array)
grey_hist_max = grey_shade_threshold * grey_hist.max()
num_grey_shades = (grey_hist > grey_hist_max).sum()
else:
edge_ratio = None
num_grey_shades = None
# Find the peaks_percent using a histogram
if image_array.ndim == 4: # Animated images
hist = {
"red": _full_histogram(image_array[:, :, :, 0]),
"green": _full_histogram(image_array[:, :, :, 1]),
"blue": _full_histogram(image_array[:, :, :, 2])
}
elif image_array.ndim == 3 and image_array.shape[2] == 3: # Static
hist = {
"red": _full_histogram(image_array[:, :, 0]),
"green": _full_histogram(image_array[:, :, 1]),
"blue": _full_histogram(image_array[:, :, 2])
}
elif image_array.ndim == 2: # Greyscale images
hist = {"grey": _full_histogram(image_array)}
# Calculate peaks by finding the number of colors which occur
# more than a given threshold. The threshold is chosen to be 1% of
# the color that occurs most number of times.
hist_concat = numpy.concatenate(tuple(hist.values()))
peaks_hist_max = freq_colors_threshold * hist_concat.max()
peaks_percent = (hist_concat > peaks_hist_max).mean()
blackwhite_mean_square_err = None
if image_array.ndim == 2: # Greyscale images
blackwhite_mean_square_err = 0
elif image_array.ndim == 3:
blackwhite_mean_square_err = 0
for chan in range(image_array.shape[2]):
blackwhite_mean_square_err += (
(image_array[:, :, chan] - grey_array) ** 2).mean()
blackwhite_mean_square_err /= image_array.shape[2]
uses_alpha = None
nd_array = self.fetch('ndarray')
if (self.is_type('alpha') and nd_array.ndim == 3 and
nd_array.shape[2] == 4):
uses_alpha = (nd_array[:, :, 3] < 255).any()
return DictNoNone({
'Color:ClosestLabeledColorRGB': closest_color,
'Color:ClosestLabeledColor': closest_label,
'Color:AverageRGB': tuple(round(i, 3) for i in mean_color),
'Color:NumberOfGreyShades': num_grey_shades,
'Color:PercentFrequentColors': peaks_percent,
'Color:EdgeRatio': edge_ratio,
'Color:MeanSquareErrorFromGrey': blackwhite_mean_square_err,
'Color:UsesAlpha': uses_alpha})
@staticmethod
def _haarcascade(image, filename, directory=None, **kwargs):
"""
Use OpenCV's haarcascade classifiers to detect certain features.
:param image: Image to use when detecting with the haarcascade.
:param filename: The file to create the CascadeClassifier with.
:param directory: The directory of the haarcascade file.
:param kwagrs: Keyword args to pass to cascade's detectMultiScale().
:return: List of rectangles of the detected objects. A rect
is defined by an array with 4 values i the order:
left, top, width, height.
"""
warn_msg = ('HAAR Cascade analysis requires the optional dependencies '
'OpenCV and opencv-data to be installed.')
try:
import cv2
except ImportError:
logging.warn(warn_msg)
return []
haar_paths = [
os.path.abspath(os.path.join(
os.path.realpath(cv2.__file__),
*([os.pardir] * 4 + ['share', 'OpenCV', 'haarcascades']))),
os.path.abspath(os.path.join(
os.path.realpath(cv2.__file__),
*([os.pardir] * 4 + ['share', 'opencv', 'haarcascades'])))]
for _dir in [directory] + haar_paths:
if _dir is not None and os.path.exists(_dir):
directory = _dir
break
if directory is None:
logging.warn(warn_msg)
return []
cascade = cv2.CascadeClassifier(os.path.join(directory, filename),)
features = cascade.detectMultiScale(image, **kwargs)
return features
def analyze_face_haarcascades(self):
"""
Use opencv's haar cascade filters to identify faces, right eye, left
eye, upper body, etc..
"""
try:
import cv2 # noqa (unused import)
from cv2 import cv
except ImportError:
logging.warn('HAAR Cascade analysis requires the optional '
'dependency OpenCV 2.x to be installed.')
return {}
image_array = self.fetch('ndarray_grey')
if image_array.ndim == 3:
logging.warn('Faces cannot be detected in animated images '
'using haarcascades yet.')
return {}
# The "scale" given here is relevant for the detection rate.
scale = max(1.0, numpy.average(image_array.shape) / 500.0)
# Equalize the histogram and make the size smaller
with warnings.catch_warnings():
warnings.simplefilter("ignore")
img_shape = map(lambda x: int(x / scale), image_array.shape)
img = skimage.img_as_ubyte(
skimage.exposure.equalize_hist(
skimage.transform.resize(image_array,
output_shape=img_shape,
preserve_range=True)))
def haar(im, key, single=False, **kwargs):
cascades = {
'frontal_face': 'haarcascade_frontalface_alt.xml',
'profile_face': 'haarcascade_profileface.xml',
'nested': 'haarcascade_eye_tree_eyeglasses.xml',
'mouth': 'haarcascade_mcs_mouth.xml',
'nose': 'haarcascade_mcs_nose.xml',
'right_eye': 'haarcascade_righteye_2splits.xml',
'left_eye': 'haarcascade_lefteye_2splits.xml',
'left_ear': 'haarcascade_mcs_leftear.xml',
'right_ear': 'haarcascade_mcs_rightear.xml',
'upper_body': 'haarcascade_upperbody.xml',
'lower_body': 'haarcascade_lowerbody.xml'}
# Set some default kwargs
kwargs['scaleFactor'] = kwargs.get('scaleFactor', 1.1)
kwargs['minNeighbors'] = kwargs.get('minNeighbors', 2)
kwargs['minSize'] = kwargs.get('minSize', (30, 30))
flags = cv.CV_HAAR_SCALE_IMAGE
if single:
flags = (flags | cv.CV_HAAR_FIND_BIGGEST_OBJECT |
cv.CV_HAAR_DO_ROUGH_SEARCH)
kwargs['flags'] = kwargs.get('flags', flags)
return list(self._haarcascade(im, cascades[key], **kwargs))
def drop_overlapping_regions(regions):
drop = set()
# Sort regions by area (leftmost is smallest and dropped first)
regions = sorted(regions, key=lambda x: x[-1] * x[-2])
# overlap: Neither range is completely greater than the other
overlap = (lambda x_min, x_width, y_min, y_width:
x_min <= y_min + y_width and y_min <= x_min + x_width)
for i1, reg1 in enumerate(regions):
for i2, reg2 in enumerate(regions[:i1]):
if (i2 not in drop and
overlap(reg1[0], reg1[2], reg2[0], reg2[2]) and
overlap(reg1[1], reg1[3], reg2[1], reg2[3])):
drop.add(i2)
for i, reg in enumerate(regions):
if i not in drop:
yield reg
frontal = haar(img, 'frontal_face')
profile = haar(img, 'profile_face')
faces = list(drop_overlapping_regions(frontal + profile))
if len(faces) == 0:
return {}
data = []
for face in faces:
scaled_face = list(map(lambda x: int(x * scale), face))
fdata = {'position': {
'left': scaled_face[0], 'top': scaled_face[1],
'width': scaled_face[2], 'height': scaled_face[3]}}
roi = list(map(int, [
max(0, face[0] - (face[2] / 8)),
max(0, face[1] - (face[3] / 8)),
min(img.shape[0], face[2] + (2 * face[2] / 8)),
min(img.shape[1], face[3] + (2 * face[3] / 8))]))
face_img = img[roi[1]:roi[1] + roi[3] - 1,
roi[0]:roi[0] + roi[2] - 1]
def feat_mid(rect, offx, offy):
return (int(scale * (roi[0] + rect[0] + offx + rect[2] / 2)),
int(scale * (roi[1] + rect[1] + offy + rect[3] // 2)))
eye_img = face_img[:roi[3] // 2, :]
nested = list(drop_overlapping_regions(haar(eye_img, 'nested')))
if len(nested) == 2:
nested = sorted(nested, key=lambda x: x[0])
fdata['eyes'] = (feat_mid(nested[0], 0, 0),
feat_mid(nested[1], 0, 0))
fdata['glasses'] = True
else:
eyes_found = []
for eye in ['left_eye', 'right_eye']:
eye_feats = haar(eye_img, eye, single=True)
if len(eye_feats) == 1:
eyes_found.append(feat_mid(eye_feats[0], 0, 0))
if len(eyes_found) > 0:
fdata['eyes'] = tuple(eyes_found)
ear_offy = roi[3] // 8
ear_img = face_img[ear_offy:roi[3] * 7 // 8, :]
ears_found = []
for ear in ['left_ear', 'right_ear']:
ear_feats = haar(ear_img, ear, single=True)
if len(ear_feats) == 1:
ears_found.append(feat_mid(ear_feats[0], 0, ear_offy))
if len(ears_found) > 0:
fdata['ears'] = tuple(ears_found)
nose_offx, nose_offy = roi[2] // 4, roi[3] // 4
nose_img = face_img[nose_offy:roi[3] * 3 // 4,
nose_offx:roi[2] * 3 // 4]
nose_feats = haar(nose_img, 'nose', single=True)
if len(nose_feats) == 1:
fdata['nose'] = feat_mid(nose_feats[0], nose_offx, nose_offy)
mouth_offy = roi[3] // 2
mouth_img = face_img[mouth_offy:, :]
mouth_feats = haar(mouth_img, 'mouth', single=True)
if len(mouth_feats) == 1:
fdata['mouth'] = feat_mid(mouth_feats[0], 0, mouth_offy)
data.append(fdata)
return {'OpenCV:Faces': data}
def analyze_facial_landmarks(self,
with_landmarks=True,
detector_upsample_num_times=0):
"""
Use ``dlib`` to find the facial landmarks and also detect pose.
Note: It works only for frontal faces, not for profile faces, etc.
:param with_landmarks:
Whether to detect the facial landmarks or not. This also computes
the location of the other facial features like the nose, mouth,
and eyes.
:param detector_upsample_num_times:
The number of times to upscale the image by when detecting faces.
:return: dict with the keys:
- dlib:Faces - A dictionary with information about the face:
- position - Dict with corner information having the keys
left, right, top, bottom.
- score - A score given on the probability of the given
feture being a face.
If the kwarg `with_landmarks` is provided, it also gives the
following information:
- nose - Location of the center of the nose.
- left eye - Location of the center of the left eye.
- right eye - Location of the center of the right eye.
- mouth - Location of the center of the mouth.
"""
image_array = self.fetch('ndarray_noalpha')
if (image_array.ndim == 4 or
(image_array.ndim == 3 and image_array.shape[2] != 3)):
logging.warn('Facial landmarks of animated images cannot be '
'detected yet.')
return {}
predictor_dat = 'shape_predictor_68_face_landmarks.dat'
predictor_arch = predictor_dat + '.bz2'
dat_path = app_dir('user_data_dir', predictor_dat)
arch_path = app_dir('user_data_dir', predictor_arch)
if with_landmarks and not os.path.exists(dat_path):
logging.warn('Downloading the landmark data file for facial '
'landmark detection. Hence, the '
'first run may take longer than normal.')
url = 'http://sourceforge.net/projects/dclib/files/dlib/v18.10/{0}'
download(url.format(predictor_arch), arch_path)
bz2_decompress(arch_path, dat_path)
detector = dlib.get_frontal_face_detector()
# TODO: Get orientation data from ``orient_id`` and use it.
faces, scores, orient_id = detector.run(
image_array,
upsample_num_times=detector_upsample_num_times)
if len(faces) == 0:
return {}
if with_landmarks:
predictor = dlib.shape_predictor(to_cstr(dat_path))
data = []
for face, score in zip(faces, scores):
fdata = {
'position': {'left': face.left(),
'top': face.top(),
'width': face.right() - face.left() + 1,
'height': face.bottom() - face.top() + 1},
'score': score}
# dlib's shape detector uses the ibug dataset to detect shape.
# More info at: http://ibug.doc.ic.ac.uk/resources/300-W/
if with_landmarks:
shape = predictor(image_array, face)
def tup(point):
return point.x, point.y
def tup2(pt1, pt2):
return int((pt1.x + pt2.x) / 2), int((pt1.y + pt2.y) / 2)
# Point 34 is the tip of the nose
fdata['nose'] = tup(shape.part(34))
# Point 40 and 37 are the two corners of the left eye
# Point 46 and 43 are the two corners of the right eye
fdata['eyes'] = (tup2(shape.part(40), shape.part(37)),
tup2(shape.part(46), shape.part(43)))
# Point 49 and 55 are the two outer corners of the mouth
fdata['mouth'] = tup2(shape.part(49), shape.part(55))
data.append(fdata)
return {'dlib:Faces': data}
def analyze_barcode_zxing(self):
"""
Use ``zxing`` to find barcodes, qr codes, data matrices, etc.
from the image.
:return: dict with the keys:
- zxing:Barcodes - An array containing information about barcodes.
Each barcode is encoded to a dictionary with the keys:
- format - The format of the barcode. Example: QR_CODE,
CODABAR, DATA_MATRIX, etc.
- data - The text data that is encdoded in the barcode.
- bounding box - A dictionary with left, width, top, height.
- points - The detection points of the barcode (4 points for
QR codes and Data matrices and 2 points for barcodes).
"""
image_array = self.fetch('ndarray')
if all(map(lambda x: x < 4, image_array.shape)):
# If the file is less than 4 pixels, it won't contain a barcode.
# Small files cause zxing to crash so, we just return empty.
return {}
if (image_array.ndim == 4 or
(image_array.ndim == 3 and
image_array.shape[2] not in (3, 4))):
logging.warn('Barcode analysis with zxing of animated images '
'or multi page images is not supported yet.')
return {}
filename = self.fetch('filename_zxing')
if filename is None:
return {}
try:
output = subprocess.check_output([
'java', '-cp', os.path.join(DATA_PATH, '*'),
'com.google.zxing.client.j2se.CommandLineRunner', '--multi',
filename],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if 'java.io.IOException: Could not load file' in err.output:
logging.error(
"`java.io` is unable to read this file. Possibly the file "
"has invalid exifdata or is corrupt. This is required for "
"zxing's barcode analysis.")
else:
logging.error(err.output)
return {}
if 'No barcode found' in output:
return {}
barcodes = []
for section in output.split("\nfile:"):
lines = section.strip().splitlines()
_format = re.search(r'format:\s([^,]+)', lines[0]).group(1)
raw_result = lines[2]
parsed_result = lines[4]
num_pts = int(re.search(r'Found (\d+) result points.', lines[5])
.group(1))
points = []
float_re = r'(\d*[.])?\d+'
for i in range(num_pts):
pt = re.search(r'\(\s*{0}\s*,\s*{0}\s*\)'.format(float_re),
lines[6 + i])
point = float(pt.group(1)), float(pt.group(2))
points.append(point)
bbox = {}
if num_pts == 2: # left, right
l, r = [(int(i), int(j)) for (i, j) in points]
bbox = {"left": l[0], "top": l[1],
"width": r[0] - l[0] + 1, "height": r[1] - l[1] + 1}
elif num_pts == 4: # bottomLeft, topLeft, topRight, bottomRight
lb, lt, rt, rb = [(int(i), int(j)) for (i, j) in points]
bbox = {"left": min(lb[0], lt[0]),
"top": min(lt[1], rt[1]),
"width": max(rb[0] - lb[0], rt[0] - lt[0]),
"height": max(rb[1] - rt[1], lb[1] - lt[1])}
barcodes.append({'format': _format, 'points': points,
'raw_data': raw_result, 'data': parsed_result,
'bounding box': bbox})
return {'zxing:Barcodes': barcodes}
def analyze_barcode_zbar(self):
"""
Use ``zbar`` to find barcodes and qr codes from the image.
:return: dict with the keys:
- zbar:Barcodes - An array containing information about barcodes.
Each barcode is encoded to a dictionary with the keys:
- format - The format of the barcode. Example: QRCODE,
I25, etc.
- data - The text data that is encdoded in the barcode.
- bounding box - A dictionary with left, width, top, height.
- confidence - The quality of the barcode. The higher it is
the more accurate the detection is.
"""
image_array = self.fetch('ndarray_grey')
if image_array.ndim == 3:
logging.warn('Barcodes cannot be detected in animated images '
'using zbar.')
return {}
height, width = image_array.shape
zbar_img = zbar.Image(width, height, 'Y800', image_array.tobytes())
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
if scanner.scan(zbar_img) == 0:
return {}
barcodes = []
for barcode in zbar_img:
p = numpy.array(barcode.location)
bbox = {"left": min(p[:, 0]), "top": min(p[:, 1]),
"width": max(p[:, 0]) - min(p[:, 0]),
"height": max(p[:, 1]) - min(p[:, 1])}
barcodes.append({'data': barcode.data,
'bounding box': bbox,
'confidence': barcode.quality,
'format': str(barcode.type)})
return {'zbar:Barcodes': barcodes}
| AbdealiJK/file-metadata | file_metadata/image/image_file.py | Python | mit | 36,216 | [
"Gaussian"
] | 4a7b567ed0cbfdb6a7fa458d6fc91f326b549c09f43534cfc15b5094135d2c7b |
# -*- coding: utf-8 -*-
"""Implementation of execution-related magic functions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from __future__ import absolute_import
import ast
import bdb
import gc
import itertools
import os
import sys
import time
import timeit
from pdb import Restart
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile, pstats
except ImportError:
profile = pstats = None
from IPython.core import oinspect
from IPython.core import magic_arguments
from IPython.core import page
from IPython.core.error import UsageError
from IPython.core.macro import Macro
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
line_cell_magic, on_off, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import py3compat
from IPython.utils.py3compat import builtin_mod, iteritems, PY3
from IPython.utils.contexts import preserve_keys
from IPython.utils.capture import capture_output
from IPython.utils.ipstruct import Struct
from IPython.utils.module_paths import find_mod
from IPython.utils.path import get_py_filename, shellglob
from IPython.utils.timing import clock, clock2
from warnings import warn
from logging import error
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
class TimeitResult(object):
"""
Object returned by the timeit magic with info about the run.
Contains the following attributes :
loops: (int) number of loops done per measurement
repeat: (int) number of times the measurement has been repeated
best: (float) best execution time / number
all_runs: (list of float) execution time of each run (in s)
compile_time: (float) time of statement compilation (s)
"""
def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
self.loops = loops
self.repeat = repeat
self.best = best
self.worst = worst
self.all_runs = all_runs
self.compile_time = compile_time
self._precision = precision
def _repr_pretty_(self, p , cycle):
if self.loops == 1: # No s at "loops" if only one loop
unic = u"%d loop, best of %d: %s per loop" % (self.loops, self.repeat,
_format_time(self.best, self._precision))
else:
unic = u"%d loops, best of %d: %s per loop" % (self.loops, self.repeat,
_format_time(self.best, self._precision))
p.text(u'<TimeitResult : '+unic+u'>')
class TimeitTemplateFiller(ast.NodeTransformer):
"""Fill in the AST template for timing execution.
This is quite closely tied to the template definition, which is in
:meth:`ExecutionMagics.timeit`.
"""
def __init__(self, ast_setup, ast_stmt):
self.ast_setup = ast_setup
self.ast_stmt = ast_stmt
def visit_FunctionDef(self, node):
"Fill in the setup statement"
self.generic_visit(node)
if node.name == "inner":
node.body[:1] = self.ast_setup.body
return node
def visit_For(self, node):
"Fill in the statement to be timed"
if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
node.body = self.ast_stmt.body
return node
class Timer(timeit.Timer):
"""Timer class that explicitly uses self.inner
which is an undocumented implementation detail of CPython,
not shared by PyPy.
"""
# Timer.timeit copied from CPython 3.4.2
def timeit(self, number=timeit.default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
it = itertools.repeat(None, number)
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
@magics_class
class ExecutionMagics(Magics):
"""Magics related to code execution, debugging, profiling, etc.
"""
def __init__(self, shell):
super(ExecutionMagics, self).__init__(shell)
if profile is None:
self.prun = self.profile_missing_notice
# Default execution function used to actually run user code.
self.default_runner = None
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
@skip_doctest
@line_cell_magic
def prun(self, parameter_s='', cell=None):
"""Run a statement through the python code profiler.
Usage, in line mode:
%prun [options] statement
Usage, in cell mode:
%%prun [options] [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>
you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
information about class constructors.
-r
return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>
sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
============ =====================
Valid Arg Meaning
============ =====================
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
============ =====================
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T <filename>
save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D <filename>
save (via dump_stats) profile statistics to given
filename. This data is in a format understood by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
-q
suppress output to the pager. Best used with -T and/or -D above.
If you want to run complete programs under the profiler's control, use
``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
"""
opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = self.shell.input_splitter.transform_cell(arg_str)
return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
def _run_with_profiler(self, code, opts, namespace):
"""
Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
Parameters
----------
code : str
Code to be executed.
opts : Struct
Options parsed by `self.parse_options`.
namespace : dict
A dictionary for Python namespace (e.g., `self.shell.user_ns`).
"""
# Fill default values for unspecified options:
opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
prof = profile.Profile()
try:
prof = prof.runctx(code, namespace, namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
stats_stream = stats.stream
try:
stats.stream = stdout_trap
stats.print_stats(*lims)
finally:
stats.stream = stats_stream
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print(sys_exit, end=' ')
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
prof.dump_stats(dump_file)
print('\n*** Profile stats marshalled to file',\
repr(dump_file)+'.',sys_exit)
if text_file:
pfile = open(text_file,'w')
pfile.write(output)
pfile.close()
print('\n*** Profile printout saved to text file',\
repr(text_file)+'.',sys_exit)
if 'r' in opts:
return stats
else:
return None
@line_magic
def pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your configuration
file (the option is ``InteractiveShell.pdb``).
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print('Automatic pdb calling has been turned',on_off(new_pdb))
@skip_doctest
@magic_arguments.magic_arguments()
@magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
help="""
Set break point at LINE in FILE.
"""
)
@magic_arguments.argument('statement', nargs='*',
help="""
Code to run in debugger.
You can omit this in cell magic mode.
"""
)
@line_cell_magic
def debug(self, line='', cell=None):
"""Activate the interactive debugger.
This magic command support two ways of activating debugger.
One is to activate debugger before executing code. This way, you
can set a break point, to step through the code from the point.
You can use this mode by giving statements to execute and optionally
a breakpoint.
The other one is to activate debugger in post-mortem mode. You can
activate this mode simply running %debug without any argument.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
"""
args = magic_arguments.parse_argstring(self.debug, line)
if not (args.breakpoint or args.statement or cell):
self._debug_post_mortem()
else:
code = "\n".join(args.statement)
if cell:
code += "\n" + cell
self._debug_exec(code, args.breakpoint)
def _debug_post_mortem(self):
self.shell.debugger(force=True)
def _debug_exec(self, code, breakpoint):
if breakpoint:
(filename, bp_line) = breakpoint.rsplit(':', 1)
bp_line = int(bp_line)
else:
(filename, bp_line) = (None, None)
self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
@line_magic
def tb(self, s):
"""Print the last traceback with the currently active exception mode.
See %xmode for changing exception reporting modes."""
self.shell.showtraceback()
@skip_doctest
@line_magic
def run(self, parameter_s='', runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage::
%run [-n -i -e -G]
[( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
( -m mod | file ) [args]
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt ``python file args``,
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for __name__
and sys.argv). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Arguments are expanded using shell-like glob match. Patterns
'*', '?', '[seq]' and '[!seq]' can be used. Additionally,
tilde '~' will be expanded into user's home directory. Unlike
real shells, quotation does not suppress expansions. Use
*two* back slashes (e.g. ``\\\\*``) to suppress expansions.
To completely disable these expansions, you can use -G flag.
Options:
-n
__name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ``if __name__ == "__main__"`` clause.
-i
run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e
ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t
print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional ``-N<N>`` option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script uniq_stable.py)::
In [1]: run -t uniq_stable
IPython CPU timings (estimated):
User : 0.19597 s.
System: 0.0 s.
In [2]: run -t -N5 uniq_stable
IPython CPU timings (estimated):
Total runs performed: 5
Times : Total Per run
User : 0.910862 s, 0.1821724 s.
System: 0.0 s, 0.0 s.
-d
run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling::
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example::
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
Or you can specify a breakpoint in a different file::
%run -d -b myotherfile.py:20 myscript
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without quotes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p
run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy[nb], the file is run as ipython script,
just as if the commands were written on IPython prompt.
-m
specify module name to load instead of script path. Similar to
the -m option for the python interpreter. Use this option last if you
want to combine with other %run options. Unlike the python interpreter
only source modules are allowed no .pyc or .pyo files.
For example::
%run -m example
will run the example module.
-G
disable shell-like glob expansion of arguments.
"""
# get arguments and set sys.argv for program to be run.
opts, arg_lst = self.parse_options(parameter_s,
'nidtN:b:pD:l:rs:T:em:G',
mode='list', list_all=1)
if "m" in opts:
modulename = opts["m"][0]
modpath = find_mod(modulename)
if modpath is None:
warn('%r is not a valid modulename on sys.path'%modulename)
return
arg_lst = [modpath] + arg_lst
try:
filename = file_finder(arg_lst[0])
except IndexError:
warn('you must provide at least a filename.')
print('\n%run:\n', oinspect.getdoc(self.run))
return
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
error(msg)
return
if filename.lower().endswith(('.ipy', '.ipynb')):
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = filename
self.shell.safe_execfile_ipy(filename)
return
# Control the response to exit() calls made by the script being run
exit_ignore = 'e' in opts
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
if 'G' in opts:
args = arg_lst[1:]
else:
# tilde and glob expansion
args = shellglob(map(os.path.expanduser, arg_lst[1:]))
sys.argv = [filename] + args # put in the proper filename
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
if 'i' in opts:
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = '__main__'
main_mod = self.shell.user_module
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
# TK: Is this necessary in interactive mode?
prog_ns['__file__'] = filename
else:
# Run in a fresh, empty namespace
if 'n' in opts:
name = os.path.splitext(os.path.basename(filename))[0]
else:
name = '__main__'
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references). See interactiveshell for details
main_mod = self.shell.new_main_mod(filename, name)
prog_ns = main_mod.__dict__
# pickle fix. See interactiveshell for an explanation. But we need to
# make sure that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
if 'p' in opts or 'd' in opts:
if 'm' in opts:
code = 'run_module(modulename, prog_ns)'
code_ns = {
'run_module': self.shell.safe_run_module,
'prog_ns': prog_ns,
'modulename': modulename,
}
else:
if 'd' in opts:
# allow exceptions to raise in debug mode
code = 'execfile(filename, prog_ns, raise_exceptions=True)'
else:
code = 'execfile(filename, prog_ns)'
code_ns = {
'execfile': self.shell.safe_execfile,
'prog_ns': prog_ns,
'filename': get_py_filename(filename),
}
try:
stats = None
if 'p' in opts:
stats = self._run_with_profiler(code, opts, code_ns)
else:
if 'd' in opts:
bp_file, bp_line = parse_breakpoint(
opts.get('b', ['1'])[0], filename)
self._run_with_debugger(
code, code_ns, filename, bp_line, bp_file)
else:
if 'm' in opts:
def run():
self.shell.safe_run_module(modulename, prog_ns)
else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
def run():
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
if 't' in opts:
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
self._run_with_timing(run, nruns)
else:
# regular execution
run()
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = builtin_mod
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
return stats
def _run_with_debugger(self, code, code_ns, filename=None,
bp_line=None, bp_file=None):
"""
Run `code` in debugger with a break point.
Parameters
----------
code : str
Code to execute.
code_ns : dict
A namespace in which `code` is executed.
filename : str
`code` is ran as if it is in `filename`.
bp_line : int, optional
Line number of the break point.
bp_file : str, optional
Path to the file in which break point is specified.
`filename` is used if not given.
Raises
------
UsageError
If the break point given by `bp_line` is not valid.
"""
deb = self.shell.InteractiveTB.pdb
if not deb:
self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
deb = self.shell.InteractiveTB.pdb
# deb.checkline() fails if deb.curframe exists but is None; it can
# handle it not existing. https://github.com/ipython/ipython/issues/10028
if hasattr(deb, 'curframe'):
del deb.curframe
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
deb.clear_all_breaks()
if bp_line is not None:
# Set an initial breakpoint to stop execution
maxtries = 10
bp_file = bp_file or filename
checkline = deb.checkline(bp_file, bp_line)
if not checkline:
for bp in range(bp_line + 1, bp_line + maxtries + 1):
if deb.checkline(bp_file, bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
raise UsageError(msg)
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (bp_file, bp_line))
if filename:
# Mimic Pdb._runscript(...)
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
# Start file run
print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
try:
if filename:
# save filename so it can be used by methods on the deb object
deb._exec_filename = filename
while True:
try:
deb.run(code, code_ns)
except Restart:
print("Restarting")
if filename:
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
continue
else:
break
except:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
@staticmethod
def _run_with_timing(run, nruns):
"""
Run function `run` and print timing information.
Parameters
----------
run : callable
Any callable object which takes no argument.
nruns : int
Number of times to execute `run`.
"""
twall0 = time.time()
if nruns == 1:
t0 = clock2()
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print(" User : %10.2f s." % t_usr)
print(" System : %10.2f s." % t_sys)
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print("Total runs performed:", nruns)
print(" Times : %10s %10s" % ('Total', 'Per run'))
print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
twall1 = time.time()
print("Wall time: %10.2f s." % (twall1 - twall0))
@skip_doctest
@line_cell_magic
def timeit(self, line='', cell=None):
"""Time execution of a Python statement or expression
Usage, in line mode:
%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
or in cell mode:
%%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
code
code...
Time execution of a Python statement or expression using the timeit
module. This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not timed) and the body of the cell is timed. The cell
body has access to any variables created in the setup code.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
-q: Quiet, do not print result.
-o: return a TimeitResult that can be stored in a variable to inspect
the result in more details.
Examples
--------
::
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loop, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
opts, stmt = self.parse_options(line,'n:r:tcp:qo',
posix=False, strict=False)
if stmt == "" and cell is None:
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
repeat = int(getattr(opts, "r", timeit.default_repeat))
precision = int(getattr(opts, "p", 3))
quiet = 'q' in opts
return_result = 'o' in opts
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
transform = self.shell.input_splitter.transform_cell
if cell is None:
# called as line magic
ast_setup = self.shell.compile.ast_parse("pass")
ast_stmt = self.shell.compile.ast_parse(transform(stmt))
else:
ast_setup = self.shell.compile.ast_parse(transform(stmt))
ast_stmt = self.shell.compile.ast_parse(transform(cell))
ast_setup = self.shell.transform_ast(ast_setup)
ast_stmt = self.shell.transform_ast(ast_stmt)
# Check that these compile to valid Python code *outside* the timer func
# Invalid code may become valid when put inside the function & loop,
# which messes up error messages.
# https://github.com/ipython/ipython/issues/10636
self.shell.compile(ast_setup, "<magic-timeit-setup>", "exec")
self.shell.compile(ast_stmt, "<magic-timeit-stmt>", "exec")
# This codestring is taken from timeit.template - we fill it in as an
# AST, so that we can apply our AST transformations to the user code
# without affecting the timing code.
timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
' setup\n'
' _t0 = _timer()\n'
' for _i in _it:\n'
' stmt\n'
' _t1 = _timer()\n'
' return _t1 - _t0\n')
timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
timeit_ast = ast.fix_missing_locations(timeit_ast)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
exec(code, self.shell.user_ns, ns)
timer.inner = ns["inner"]
# This is used to check if there is a huge difference between the
# best and worst timings.
# Issue: https://github.com/ipython/ipython/issues/6471
worst_tuning = 0
if number == 0:
# determine number so that 0.2 <= total time < 2.0
number = 1
for _ in range(1, 10):
time_number = timer.timeit(number)
worst_tuning = max(worst_tuning, time_number / number)
if time_number >= 0.2:
break
number *= 10
all_runs = timer.repeat(repeat, number)
best = min(all_runs) / number
worst = max(all_runs) / number
if worst_tuning:
worst = max(worst, worst_tuning)
if not quiet :
# Check best timing is greater than zero to avoid a
# ZeroDivisionError.
# In cases where the slowest timing is lesser than a micosecond
# we assume that it does not really matter if the fastest
# timing is 4 times faster than the slowest timing or not.
if worst > 4 * best and best > 0 and worst > 1e-6:
print("The slowest run took %0.2f times longer than the "
"fastest. This could mean that an intermediate result "
"is being cached." % (worst / best))
if number == 1: # No s at "loops" if only one loop
print(u"%d loop, best of %d: %s per loop" % (number, repeat,
_format_time(best, precision)))
else:
print(u"%d loops, best of %d: %s per loop" % (number, repeat,
_format_time(best, precision)))
if tc > tc_min:
print("Compiler time: %.2f s" % tc)
if return_result:
return TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
@skip_doctest
@needs_local_scope
@line_cell_magic
def time(self,line='', cell=None, local_ns=None):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, you can time the cell body (a directly
following statement raises an error).
This function provides very basic timing functionality. Use the timeit
magic for more control over the measurement.
Examples
--------
::
In [1]: %time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: %time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: %time print 'hello world'
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Note that the time needed by Python to compile the given expression
will be reported if it is more than 0.1s. In this example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: %time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: %time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
if line and cell:
raise UsageError("Can't use statement directly after '%%time'!")
if cell:
expr = self.shell.input_transformer_manager.transform_cell(cell)
else:
expr = self.shell.input_transformer_manager.transform_cell(line)
# Minimum time above which parse time will be reported
tp_min = 0.1
t0 = clock()
expr_ast = self.shell.compile.ast_parse(expr)
tp = clock()-t0
# Apply AST transformations
expr_ast = self.shell.transform_ast(expr_ast)
# Minimum time above which compilation time will be reported
tc_min = 0.1
if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
mode = 'eval'
source = '<timed eval>'
expr_ast = ast.Expression(expr_ast.body[0].value)
else:
mode = 'exec'
source = '<timed exec>'
t0 = clock()
code = self.shell.compile(expr_ast, source, mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clock2()
out = eval(code, glob, local_ns)
end = clock2()
else:
st = clock2()
exec(code, glob, local_ns)
end = clock2()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
# On windows cpu_sys is always zero, so no new information to the next print
if sys.platform != 'win32':
print("CPU times: user %s, sys: %s, total: %s" % \
(_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot)))
print("Wall time: %s" % _format_time(wall_time))
if tc > tc_min:
print("Compiler : %s" % _format_time(tc))
if tp > tp_min:
print("Parser : %s" % _format_time(tp))
return out
@skip_doctest
@line_magic
def macro(self, parameter_s=''):
"""Define a macro for future re-execution. It accepts ranges of history,
filenames or string objects.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed at the
command line is used instead.
-q: quiet macro definition. By default, a tag line is printed
to indicate the macro has been created, and then the contents of
the macro are printed. If this option is given, then no printout
is produced once the macro is created.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The syntax for indicating input ranges is described in %history.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (print using %hist -n )::
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with::
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with::
print macro_name
"""
opts,args = self.parse_options(parameter_s,'rq',mode='list')
if not args: # List existing macros
return sorted(k for k,v in iteritems(self.shell.user_ns) if\
isinstance(v, Macro))
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name, codefrom = args[0], " ".join(args[1:])
#print 'rng',ranges # dbg
try:
lines = self.shell.find_user_code(codefrom, 'r' in opts)
except (ValueError, TypeError) as e:
print(e.args[0])
return
macro = Macro(lines)
self.shell.define_macro(name, macro)
if not ( 'q' in opts) :
print('Macro `%s` created. To execute, type its name (without quotes).' % name)
print('=== Macro contents: ===')
print(macro, end=' ')
@magic_arguments.magic_arguments()
@magic_arguments.argument('output', type=str, default='', nargs='?',
help="""The name of the variable in which to store output.
This is a utils.io.CapturedIO object with stdout/err attributes
for the text of the captured output.
CapturedOutput also has a show() method for displaying the output,
and __call__ as well, so you can use that to quickly display the
output.
If unspecified, captured output is discarded.
"""
)
@magic_arguments.argument('--no-stderr', action="store_true",
help="""Don't capture stderr."""
)
@magic_arguments.argument('--no-stdout', action="store_true",
help="""Don't capture stdout."""
)
@magic_arguments.argument('--no-display', action="store_true",
help="""Don't capture IPython's rich display."""
)
@cell_magic
def capture(self, line, cell):
"""run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
args = magic_arguments.parse_argstring(self.capture, line)
out = not args.no_stdout
err = not args.no_stderr
disp = not args.no_display
with capture_output(out, err, disp) as io:
self.shell.run_cell(cell)
if args.output:
self.shell.user_ns[args.output] = io
def parse_breakpoint(text, current_file):
'''Returns (file, line) for file:line and (current_file, line) for line'''
colon = text.find(':')
if colon == -1:
return current_file, int(text)
else:
return text[:colon], int(text[colon+1:])
def _format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
import math
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(u'%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = [u"s", u"ms",u'us',"ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb5'.encode(sys.stdout.encoding)
units = [u"s", u"ms",u'\xb5s',"ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
| pacoqueen/ginn | extra/install/ipython2/ipython-5.10.0/IPython/core/magics/execution.py | Python | gpl-2.0 | 53,186 | [
"VisIt"
] | ac0eeabd7085e0f866279e47b1cb0bc8dc042e63a9a9e95b2a42f79860d82dff |
#-------------------------------------------------------------------------------
# Name: Point_Circle_Generator
#
# Author: Mark Tingey
#
# Created: 23/01/2019
#-------------------------------------------------------------------------------
##import Statements
import random as rnd
import numpy as np
import math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import csv
import os
import pandas as pd
##Define center point class
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return str((self.x, self.y))
def reset(self):
self.__init__()
##define circle class
class Circle:
def __init__(self, origin, radius):
self.origin = origin
self.radius = radius
def reset(self):
self.__init__()
##define x coordinate for circle center
def x_coordinate(radius, angle):
x = radius * math.sin(math.radians(angle))
return(x)
##define y coordinate for circle center
def y_coordinate(radius, angle):
y = radius * math.cos(math.radians(angle))
return(y)
##Update list of angles
def get_angle(angle_count):
list_of_angles = [0,45,90,135,180,225,270,315]
choose = np.random.choice(list_of_angles)
rad = choose + angle_count
return(rad)
##Global Variables
rnd.seed = 1234
np.random.seed(5)
print_angles = [0,45,90,135,180,225,270,315]
list_of_x = []
list_of_y = []
list_of_z = []
radius_of_pore = 35
t = 0
points = 1000
iteration = 0
csv_file = 0
angle_count = 0
##Create folder to hold data
path = os.getcwd()
data_folder = os.path.join(path, "Simulated_Dataset")
if os.path.isdir(data_folder)==False:
os.mkdir("Simulated_Dataset")
os.chdir(data_folder)
##iteration for each angle position
while iteration != 45:
##perform simulation
while t != points:
##define center point of the area
angle = get_angle(angle_count)
origin_x = x_coordinate(radius_of_pore, angle)
origin_y = y_coordinate(radius_of_pore, angle)
##define circle
origin = Point(origin_x,origin_y)
radius = 13.394
circle = Circle(origin, radius)
##generate point in circle
p = rnd.random() * 2 * math.pi
r = circle.radius * math.sqrt(rnd.random())
x = math.cos(p) * r
y = math.sin(p) * r
x = origin_x + x
y = origin_y + y
z = 0
##apply gaussian error to each point
error_x = np.random.normal(x, 10)
error_y = np.random.normal(y, 10)
error_z = np.random.normal(z, 10)
##write coordinates onto list
list_of_x.append(error_x)
list_of_y.append(error_y)
list_of_z.append(error_z)
##counter
t = t+1
##Write the coordinates to csv
angle_count = angle_count + 1
output = zip(list_of_x, list_of_y)
outputname = ("simulation" + str(csv_file)+".csv")
##write file to csv
with open(outputname, 'w', newline='') as writefile:
writer = csv.writer(writefile)
writer.writerows(zip(list_of_x, list_of_y))
writefile.close()
print(str(print_angles))
##reset class
Point.reset(None)
Circle.reset(None)
##change angle by 1 degree
csv_file = csv_file + 1
print_angles = [x+1 for x in print_angles]
##uncomment the below section to see a preview graph of generated points
## plt.scatter(list_of_x,list_of_y, s = 1)
## plt.show()
iteration = iteration + 1
t = 0
list_of_x = []
list_of_y = []
list_of_z = []
print(str(print_angles))
| andrewruba/YangLab | JPC simulations 2019/Figure 9 - fold symmetry and rotation/point_circle_generator.py | Python | gpl-3.0 | 3,749 | [
"Gaussian"
] | eb27f32887bec0900b0d5b5510b8a026ef2ad3e16335491c12740c51c0825894 |
import os
import glob
import warnings
import logging
import re
import atexit
logger = logging.getLogger(__name__)
@atexit.register
def cleanup():
for f in glob.glob('/sys/class/tacho-motor/motor*/command'):
with open(f, 'w') as f:
f.write('stop')
for f in glob.glob('/sys/class/leds/*/trigger'):
with open(f, 'w') as f:
f.write('none')
for f in glob.glob('/sys/class/leds/*/brightness'):
with open(f, 'w') as f:
f.write('0')
class NoSuchSensorError(Exception):
def __init__(self, port, name=None):
self.port = port
self.name = name
def __str__(self):
return "No such sensor port=%d name=%s" % (self.port, self.name)
class NoSuchMotorError(Exception):
def __init__(self, port, _type):
self.port = port
self._type = _type
def __str__(self):
return "No such motor port=%s type=%s" % (self.port, self._type)
class NoSuchLibraryError(Exception):
def __init__(self, lib=""):
self.lib = lib
def __str__(self):
return "No such library %s" % self.lib
class Ev3StringType(object):
@staticmethod
def post_read(value):
return value
@staticmethod
def pre_write(value):
return value
class Ev3IntType(object):
@staticmethod
def post_read(value):
return int(value)
@staticmethod
def pre_write(value):
return str(value)
class Ev3BoolType(object):
@staticmethod
def post_read(value):
return bool(value)
@staticmethod
def pre_write(value):
return '1' if value else '0'
class Ev3OnOffType(object):
@staticmethod
def post_read(value):
return True if value == 'on' else False
@staticmethod
def pre_write(value):
if (value == 'on' or value == 'off'):
return value
else:
return 'on' if bool(value) else 'off'
class create_ev3_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, args in self.kwargs.items():
def ev3_property(name, read_only=False, write_only=False, property_type=Ev3StringType):
def fget(self):
if not write_only:
return property_type.post_read(self.read_value(name))
else:
return None
def fset(self, value):
self.write_value(
name, property_type.pre_write(value))
return property(fget, None if read_only else fset)
setattr(cls, name, ev3_property(name, **args))
return cls
def get_battery_percentage():
"""
Return an int() of the percentage of battery life remaining
"""
voltage_max = None
voltage_min = None
voltage_now = None
with open('/sys/devices/platform/legoev3-battery/power_supply/legoev3-battery/uevent', 'r') as fh:
for line in fh:
if not voltage_max:
re_voltage_max = re.search(
'POWER_SUPPLY_VOLTAGE_MAX_DESIGN=(\d+)', line)
if re_voltage_max:
voltage_max = int(re_voltage_max.group(1))
continue
if not voltage_min:
re_voltage_min = re.search(
'POWER_SUPPLY_VOLTAGE_MIN_DESIGN=(\d+)', line)
if re_voltage_min:
voltage_min = int(re_voltage_min.group(1))
continue
if not voltage_now:
re_voltage_now = re.search(
'POWER_SUPPLY_VOLTAGE_NOW=(\d+)', line)
if re_voltage_now:
voltage_now = int(re_voltage_now.group(1))
if re_voltage_max and re_voltage_min and re_voltage_now:
break
if voltage_max and voltage_min and voltage_now:
# This happens with the EV3 rechargeable battery if it is fully charge
if voltage_now >= voltage_max:
return 100
# Haven't seen this scenario but it can't hurt to check for it
elif voltage_now <= voltage_min:
return 0
# voltage_now is between the min and max
else:
voltage_max -= voltage_min
voltage_now -= voltage_min
return int(voltage_now / float(voltage_max) * 100)
else:
logger.error('voltage_max %s, voltage_min %s, voltage_now %s' %
(voltage_max, voltage_min, voltage_now))
return 0
class Ev3Dev(object):
def __init__(self):
self.sys_path = ""
def read_value(self, name):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file) as f:
value = f.read().strip()
return value
else:
return None
def write_value(self, name, value):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file, 'w') as f:
f.write(str(value))
else:
return
@create_ev3_property(
bin_data={'read_only': True},
bin_data_format={'read_only': True},
decimals={'read_only': True},
#mode={ 'read_only': False},
fw_version={'read_only': True},
modes={'read_only': True},
name={'read_only': True},
port_name={'read_only': True},
uevent={'read_only': True},
units={'read_only': True},
value0={'read_only': True, 'property_type': Ev3IntType},
value1={'read_only': True, 'property_type': Ev3IntType},
value2={'read_only': True, 'property_type': Ev3IntType},
value3={'read_only': True, 'property_type': Ev3IntType},
value4={'read_only': True, 'property_type': Ev3IntType},
value5={'read_only': True, 'property_type': Ev3IntType},
value6={'read_only': True, 'property_type': Ev3IntType},
value7={'read_only': True, 'property_type': Ev3IntType}
)
class LegoSensor(Ev3Dev):
def __init__(self, port=-1, name=None):
Ev3Dev.__init__(self)
sensor_existing = False
if (port > 0):
self.port = port
for p in glob.glob('/sys/class/lego-sensor/sensor*/port_name'):
with open(p) as f:
value = f.read().strip()
port_len = len(str(port))
if (value[:port_len + 2] == 'in' + str(port)):
self.sys_path = os.path.dirname(p)
sensor_existing = True
break
if (len(glob.glob('/sys/class/lego-sensor/sensor*/driver_name')) > 0 and name != None and port == -1):
for p in glob.glob('/sys/class/lego-sensor/sensor*/driver_name'):
with open(p) as f:
value = f.read().strip()
if (name in value):
self.sys_path = os.path.dirname(p)
self.port = int(self.port_name.split(':')[0][2:])
sensor_existing = True
break
if (not sensor_existing):
raise NoSuchSensorError(port, name)
self._mode = self.read_value('mode')
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if (self._mode != value):
self._mode = value
self.write_value('mode', value)
def mode_force_flush(self, value):
self._mode = value
self.write_value('mode', value)
class Enum(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs[arg] = arg
self.enum_dict = kwargs
def __getattr__(self, name):
if (name in self.enum_dict.keys()):
return self.enum_dict[name]
else:
raise NameError("no such item %s" % name)
@create_ev3_property(
commands={'read_only': True},
command={'read_only': True, 'write_only': True},
count_per_rot={'read_only': True, 'property_type': Ev3IntType},
driver_name={'read_only': True},
duty_cycle={'read_only': True, 'property_type': Ev3IntType},
duty_cycle_sp={'read_only': False, 'property_type': Ev3IntType},
encoder_polarity={'read_only': False},
polarity_mode={'read_only': False},
port_name={'read_only': True},
position={'read_only': False, 'property_type': Ev3IntType},
position_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_down_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_up_sp={'read_only': False, 'property_type': Ev3IntType},
speed={'read_only': True, 'property_type': Ev3IntType},
speed_regulation={'read_only': False, 'property_type': Ev3OnOffType},
speed_sp={'read_only': False, 'property_type': Ev3IntType},
state={'read_only': True},
stop_command={'read_only': False},
stop_commands={'read_only': True},
time_sp={'read_only': False, 'property_type': Ev3IntType},
uevent={'read_only': True}
)
class Motor(Ev3Dev):
STOP_MODE = Enum(COAST='coast', BRAKE='brake', HOLD='hold')
POSITION_MODE = Enum(RELATIVE='relative', ABSOLUTE='absolute')
PORT = Enum('A', 'B', 'C', 'D')
def __init__(self, port='', _type=''):
Ev3Dev.__init__(self)
motor_existing = False
searchpath = '/sys/class/tacho-motor/motor*/'
if (port != ''):
self.port = port
for p in glob.glob(searchpath + 'port_name'):
with open(p) as f:
value = f.read().strip()
if (value.lower() == ('out' + port).lower()):
self.sys_path = os.path.dirname(p)
motor_existing = True
break
if (_type != '' and port == ''):
for p in glob.glob(searchpath + 'driver_name'):
with open(p) as f:
value = f.read().strip()
if (value.lower() == _type.lower()):
self.sys_path = os.path.dirname(p)
self.port = self.port_name[3:]
motor_existing = True
break
if (not motor_existing):
raise NoSuchMotorError(port, _type)
def stop(self):
self.write_value('command', 'stop')
def start(self):
self.write_value('command', self.mode)
def reset(self):
self.write_value('command', 'reset')
# setup functions just set up all the values, run calls start (run=1)
# these are separated so that multiple motors can be started at the same time
def setup_forever(self, speed_sp, **kwargs):
self.mode = 'run-forever'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
speed_regulation = self.speed_regulation
if (speed_regulation):
self.speed_sp = int(speed_sp)
else:
self.duty_cycle_sp = int(speed_sp)
def run_forever(self, speed_sp, **kwargs):
self.setup_forever(speed_sp, **kwargs)
self.start()
def setup_time_limited(self, time_sp, speed_sp, **kwargs):
self.mode = 'run-timed'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
speed_regulation = self.speed_regulation
if (speed_regulation):
self.speed_sp = int(speed_sp)
self.write_value('speed_sp', speed_sp)
self.write_value('speed_regulation', 'on')
else:
self.duty_cycle_sp = int(speed_sp)
self.write_value('duty_cycle_sp', speed_sp)
self.write_value('speed_regulation', 'off')
self.time_sp = int(time_sp)
self.write_value('time_sp', time_sp)
def run_time_limited(self, time_sp, speed_sp, **kwargs):
self.setup_time_limited(time_sp, speed_sp, **kwargs)
self.start()
def setup_position_limited(self, position_sp, speed_sp, absolute=True, **kwargs):
if absolute == True:
self.mode = 'run-to-abs-pos'
else:
self.mode = 'run-to-rel-pos'
kwargs['speed_regulation'] = True
self.write_value('speed_regulation', 'on')
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
self.speed_sp = int(speed_sp)
self.write_value('speed_sp', speed_sp)
self.position_sp = int(position_sp)
self.write_value('position_sp', position_sp)
def run_position_limited(self, position_sp, speed_sp, **kwargs):
self.setup_position_limited(position_sp, speed_sp, **kwargs)
self.start()
def I2CSMBusProxy(cls):
try:
from smbus import SMBus
smbus_proxied_methods = [
m for m in dir(SMBus) if (m.startswith('read') or m.startswith('write'))]
for m in smbus_proxied_methods:
def create_proxied_smb_method(method):
def proxied_smb_method(self, *args, **kwargs):
return getattr(self.b, method)(self.addr, *args, **kwargs)
return proxied_smb_method
setattr(cls, m, create_proxied_smb_method(m))
return cls
except ImportError:
warnings.warn('python-smbus binding not found!')
return cls
@I2CSMBusProxy
class I2CS(object):
def __init__(self, port, addr):
self.port = port
self.i2c_port = port + 2
self.sys_path = '/dev/i2c-%s' % self.i2c_port
if (not os.path.exists(self.sys_path)):
raise NoSuchSensorError(port)
try:
from smbus import SMBus
self.b = SMBus(self.i2c_port)
self.addr = addr
except ImportError:
raise NoSuchLibraryError('smbus')
def read_byte_array(self, reg, _len):
return [self.read_byte_data(reg + r) for r in range(_len)]
def read_byte_array_as_string(self, reg, _len):
return ''.join(chr(r) for r in self.read_byte_array(reg, _len))
class create_i2c_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, reg_address_and_read_only in self.kwargs.items():
def i2c_property(reg, read_only=True):
def fget(self):
return self.read_byte_data(reg)
def fset(self, value):
return self.write_byte_data(reg, value)
return property(fget, None if read_only else fset)
if (type(reg_address_and_read_only) == int):
prop = i2c_property(reg_address_and_read_only)
else:
prop = i2c_property(
reg_address_and_read_only[0], **reg_address_and_read_only[1])
setattr(cls, name, prop)
return cls
@create_ev3_property(
brightness={'read_only': False, 'property_type': Ev3IntType},
max_brightness={'read_only': True, 'property_type': Ev3IntType},
trigger={'read_only': False},
delay_on={'read_only': False, 'property_type': Ev3IntType},
delay_off={'read_only': False, 'property_type': Ev3IntType}
)
class LEDLight(Ev3Dev):
def __init__(self, light_path):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/class/leds/' + light_path
class LEDSide (object):
def __init__(self, left_or_right):
self.green = LEDLight('ev3-%s1:green:ev3dev' % left_or_right)
self.red = LEDLight('ev3-%s0:red:ev3dev' % left_or_right)
self._color = (0, 0)
@property
def color(self):
"""LED color (RED, GREEN), where RED and GREEN are integers
between 0 and 255."""
return self._color
@color.setter
def color(self, value):
assert len(value) == 2
assert 0 <= value[0] <= self.red.max_brightness
assert 0 <= value[1] <= self.green.max_brightness
self._color = (
self.red.brightness, self.green.brightness) = tuple(value)
def blink(self, color=(0, 0), **kwargs):
if (color != (0, 0)):
self.color = color
for index, light in enumerate((self.red, self.green)):
if (not self._color[index]):
continue
light.trigger = 'timer'
for p, v in kwargs.items():
setattr(light, p, v)
def on(self):
self.green.trigger, self.red.trigger = 'none', 'none'
self.red.brightness, self.green.brightness = self._color
def off(self):
self.green.trigger, self.red.trigger = 'none', 'none'
self.red.brightness, self.green.brightness = 0, 0
class LED(object):
class COLOR:
NONE = (0, 0)
RED = (255, 0)
GREEN = (0, 255)
YELLOW = (25, 255)
ORANGE = (120, 255)
AMBER = (255, 255)
left = LEDSide('left')
right = LEDSide('right')
@create_ev3_property(
tone={'read_only': False},
mode={'read_only': True},
volume={'read_only': False, 'property_type': Ev3IntType}
)
class Tone(Ev3Dev):
def __init__(self):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/devices/platform/snd-legoev3'
def play(self, frequency, milliseconds=1000):
self.tone = '%d %d' % (frequency, milliseconds)
def stop(self):
self.tone = '0'
class Lcd(object):
def __init__(self):
try:
from PIL import Image, ImageDraw
SCREEN_WIDTH = 178
SCREEN_HEIGHT = 128
HW_MEM_WIDTH = int((SCREEN_WIDTH + 31) / 32) * 4
SCREEN_MEM_WIDTH = int((SCREEN_WIDTH + 7) / 8)
LCD_BUFFER_LENGTH = SCREEN_MEM_WIDTH * SCREEN_HEIGHT
LCD_HW_BUFFER_LENGTH = HW_MEM_WIDTH * SCREEN_HEIGHT
self._buffer = Image.new(
"1", (HW_MEM_WIDTH * 8, SCREEN_HEIGHT), "white")
self._draw = ImageDraw.Draw(self._buffer)
except ImportError:
raise NoSuchLibraryError('PIL')
def update(self):
f = os.open('/dev/fb0', os.O_RDWR)
os.write(f, self._buffer.tobytes("raw", "1;IR"))
os.close(f)
@property
def buffer(self):
return self._buffer
@property
def draw(self):
return self._draw
def reset(self):
self._draw.rectangle(
(0, 0) + self._buffer.size, outline='white', fill='white')
class attach_ev3_keys(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
key_const = {}
for key_name, key_code in self.kwargs.items():
def attach_key(key_name, key_code):
def fget(self):
buf = self.polling()
return self.test_bit(key_code, buf)
return property(fget)
setattr(cls, key_name, attach_key(key_name, key_code))
key_const[key_name.upper()] = key_code
setattr(cls, 'CODE', Enum(**key_const))
return cls
import array
import fcntl
@attach_ev3_keys(
up=103,
down=108,
left=105,
right=106,
enter=28,
backspace=14
)
class Key(object):
def __init__(self):
pass
def EVIOCGKEY(self, length):
return 2 << (14 + 8 + 8) | length << (8 + 8) | ord('E') << 8 | 0x18
def test_bit(self, bit, bytes):
# bit in bytes is 1 when released and 0 when pressed
return not bool(bytes[int(bit / 8)] & 1 << bit % 8)
def polling(self):
KEY_MAX = 0x2ff
BUF_LEN = int((KEY_MAX + 7) / 8)
buf = array.array('B', [0] * BUF_LEN)
with open('/dev/input/by-path/platform-gpio-keys.0-event', 'r') as fd:
ret = fcntl.ioctl(fd, self.EVIOCGKEY(len(buf)), buf)
if (ret < 0):
return None
else:
return buf
| evz/python-ev3 | ev3/ev3dev.py | Python | apache-2.0 | 20,034 | [
"Amber"
] | c412a6fa2f17fb5b66268644b7c44835d01d7159f0121e985fe02b9f1cf412dc |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 17:03:46 2015
new script for doing HDCM C1Roll and C2X calibration automatically
it requires the HDCM Bragg is calibrated and the d111 and dBragg in SRXenergy script are up-to-date
converting to be compatible with bluesky, still editing
@author: xf05id1
"""
import SRXenergy
from epics import caget
from epics import caput
from epics import PV
import time
import string
from matplotlib import pyplot
import subprocess
import scipy as sp
import scipy.optimize
import math
import numpy as np
import srxbpm
def hdcm_c1roll_c2x_calibration():
onlyplot = False
#startTi = False
usecamera = True
endstation = False
numAvg = 10
print(energy._d_111)
print(energy._delta_bragg)
if endstation == False: #default BPM1
q=38690.42-36449.42 #distance of observing point to DCM; here observing at BPM1
camPixel=0.006 #mm
expotimePV = 'XF:05IDA-BI:1{BPM:1-Cam:1}AcquireTime'
else:
q=(62487.5+280)-36449.42 #distance of observing point to DCM; here observing at 28 cm downstream of M3. M3 is at 62.4875m from source
camPixel=0.00121 #mm
expotimePV = 'XF:05IDD-BI:1{Mscp:1-Cam:1}AcquireTime'
if onlyplot == False:
if endstation == True:
cenxPV= 'XF:05IDD-BI:1{Mscp:1-Cam:1}Stats1:CentroidX_RBV'
cenyPV= 'XF:05IDD-BI:1{Mscp:1-Cam:1}Stats1:CentroidY_RBV'
else:
cenxPV= 'XF:05IDA-BI:1{BPM:1-Cam:1}Stats1:CentroidX_RBV'
cenyPV= 'XF:05IDA-BI:1{BPM:1-Cam:1}Stats1:CentroidY_RBV'
bragg_rbv = PV('XF:05IDA-OP:1{Mono:HDCM-Ax:P}Mtr.RBV')
bragg_val = PV('XF:05IDA-OP:1{Mono:HDCM-Ax:P}Mtr.VAL')
ctmax = PV('XF:05IDA-BI:1{BPM:1-Cam:1}Stats1:MaxValue_RBV')
expo_time = PV('XF:05IDA-BI:1{BPM:1-Cam:1}AcquireTime_RBV')
umot_go = PV('SR:C5-ID:G1{IVU21:1-Mtr:2}Sw:Go')
#know which edges to go to
#if startTi == True:
# elementList=['Ti', 'Fe', 'Cu', 'Se']
#else:
# elementList=['Se', 'Cu', 'Fe', 'Ti']
if endstation == False:
#if dcm_bragg.position > 15:
if bragg_rbv.get() > 15:
#elementList=['Ti', 'Cr', 'Fe', 'Cu', 'Se']
#Ti requires exposure times that would require resetting the
#threshold in the stats record
elementList=['Cr', 'Fe', 'Cu', 'Se']
else:
#elementList=['Se', 'Cu', 'Fe', 'Cr', 'Ti']
elementList=['Se', 'Cu', 'Fe', 'Cr']
else:
if bragg_rbv.get() > 13:
#if dcm_bragg.position > 13:
elementList=['Ti', 'Cr', 'Fe', 'Cu', 'Se']
else:
elementList=['Se', 'Cu', 'Fe', 'Cr', 'Ti']
energyDic={'Cu':8.979, 'Se': 12.658, 'Fe':7.112, 'Ti':4.966, 'Cr':5.989}
harmonicDic={'Cu':5, 'Se': 5, 'Fe':3, 'Ti':3, 'Cr':3} #150 mA, 20151007
#use for camera option
expotime={'Cu':0.003, 'Fe':0.004, 'Se':0.005, 'Ti':0.015, 'Cr':0.006} #250 mA, 20161118, BPM1
#expotime={'Cu':0.005, 'Fe':0.008, 'Se':0.01, 'Ti':0.03, 'Cr':0.0012} #150 mA, 20151110, BPM1
#expotime={'Cu':0.1, 'Fe':0.2, 'Se':0.2, 'Cr': 0.3} #150 mA, 20151007, end-station
#use for bpm option
foilDic={'Cu':25.0, 'Se': 0.0, 'Fe':25.0, 'Ti':25}
centroidX={}
centroidY={}
theoryBragg=[]
dx=[]
dy=[]
C2Xval=caget('XF:05IDA-OP:1{Mono:HDCM-Ax:X2}Mtr.VAL')
C1Rval=caget('XF:05IDA-OP:1{Mono:HDCM-Ax:R1}Mtr.VAL')
#dBragg=SRXenergy.whdBragg()
dBragg = energy._delta_bragg
for element in elementList:
centroidXSample=[]
centroidYSample=[]
print(element)
E=energyDic[element]
print('Edge:', E)
energy.move_c2_x.put(False)
energy.move(E,wait=True)
# energy.set(E)
#
# while abs(energy.energy.position - E) > 0.001 :
# time.sleep(1)
print('done moving energy')
#BraggRBV, C2X, ugap=SRXenergy.EtoAll(E, harmonic = harmonicDic[element])
#print BraggRBV
#print ugap
#print C2X, '\n'
#go to the edge
#ugap_set=PV('SR:C5-ID:G1{IVU21:1-Mtr:2}Inp:Pos')
#ugap_rbv=PV('SR:C5-ID:G1{IVU21:1-LEnc}Gap')
# print 'move undulator gap to:', ugap
#ivu1_gap.move(ugap)
# ugap_set.put(ugap, wait=True)
# umot_go.put(0)
# time.sleep(10)
# while (ugap_rbv.get() - ugap) >=0.01 :
# time.sleep(5)
# time.sleep(2)
# print 'move Bragg to:', BraggRBV
# bragg_val.put(BraggRBV, wait= True)
# while (bragg_rbv.get() - BraggRBV) >=0.01 :
# time.sleep(5)
#dcm_bragg.move(BraggRBV)
# time.sleep(2)
if usecamera == True:
caput(expotimePV, expotime[element])
while ctmax.get() <= 200:
caput(expotimePV, expo_time.get()+0.001)
print('increasing exposuring time.')
time.sleep(0.6)
while ctmax.get() >= 180:
caput(expotimePV, expo_time.get()-0.001)
print('decreasing exposuring time.')
time.sleep(0.6)
print('final exposure time =' + str(expo_time.get()))
print('final max count =' + str(ctmax.get()))
#record the centroids on BPM1 camera
print('collecting positions with', numAvg, 'averaging...')
for i in range(numAvg):
centroidXSample.append(caget(cenxPV))
centroidYSample.append(caget(cenyPV))
time.sleep(2)
if endstation == False:
centroidX[element] = sum(centroidXSample)/len(centroidXSample)
else:
#centroidX[element] = 2452-sum(centroidXSample)/len(centroidXSample)
centroidX[element] = sum(centroidXSample)/len(centroidXSample)
centroidY[element] = sum(centroidYSample)/len(centroidYSample)
print(centroidXSample)
print(centroidYSample)
#print centroidX, centroidY
#centroidX[element]=caget(cenxPV)
#centroidY[element]=caget(cenyPV)
dx.append(centroidX[element]*camPixel)
dy.append(centroidY[element]*camPixel)
print(centroidX)
print(centroidY, '\n')
#raw_input("press enter to continue...")
# else:
#
# bpm1_y.move(foilDic[element])
# time.sleep(2)
# position=bpm1.Pavg(Nsamp=numAvg)
# dx.append(position['H'])
# dy.append(position['V'])
# print dx
# print dy
theoryBragg.append(energy.bragg.position+dBragg)
#fitting
#fit centroid x to determine C1roll
#fit centroid y to determine C2X
if endstation == True:
temp=dx
dx=dy
dy=temp
print('C2Xval=', C2Xval)
print('C1Rval=', C1Rval)
print('dx=', dx)
print('dy=', dy)
print('theoryBragg=', theoryBragg)
else:
C1Rval=caget('XF:05IDA-OP:1{Mono:HDCM-Ax:R1}Mtr.VAL')
C2Xval=caget('XF:05IDA-OP:1{Mono:HDCM-Ax:X2}Mtr.VAL')
fitfunc = lambda pa, x: pa[1]*x+pa[0]
errfunc = lambda pa, x, y: fitfunc(pa,x) - y
pi=math.pi
sinBragg=np.sin(np.array(theoryBragg)*pi/180)
sin2Bragg=np.sin(np.array(theoryBragg)*2*pi/180)
print('sinBragg=', sinBragg)
print('sin2Bragg=', sin2Bragg)
guess = [dx[0], (dx[-1]-dx[0])/(sinBragg[-1]-sinBragg[0])]
fitted_dx, success = sp.optimize.leastsq(errfunc, guess, args = (sinBragg, dx))
print('dx=', fitted_dx[1], '*singBragg +', fitted_dx[0])
droll=fitted_dx[1]/2/q*1000 #in mrad
print('current C1Roll:', C1Rval)
print('current C1Roll is off:', -droll)
print('calibrated C1Roll:', C1Rval + droll, '\n')
sin2divBragg = sin2Bragg/sinBragg
print('sin2divBragg=', sin2divBragg)
guess = [dy[0], (dy[-1]-dy[0])/(sin2divBragg[-1]-sin2divBragg[0])]
fitted_dy, success = sp.optimize.leastsq(errfunc, guess, args = (sin2divBragg, dy))
print('dy=', fitted_dy[1], '*(sin2Bragg/sinBragg) +', fitted_dy[0])
print('current C2X:', C2Xval)
print('current C2X corresponds to crystal gap:', fitted_dy[1])
pyplot.figure(1)
pyplot.plot(sinBragg, dx, 'b+')
pyplot.plot(sinBragg, sinBragg*fitted_dx[1]+fitted_dx[0], 'k-')
pyplot.title('C1Roll calibration')
pyplot.xlabel('sin(Bragg)')
if endstation == False:
pyplot.ylabel('dx at BPM1 (mm)')
else:
pyplot.ylabel('dx at endstation (mm)')
pyplot.show()
pyplot.figure(2)
pyplot.plot(sin2divBragg, dy, 'b+')
pyplot.plot(sin2divBragg, sin2divBragg*fitted_dy[1]+fitted_dy[0], 'k-')
pyplot.title('C2X calibration')
pyplot.xlabel('sin(2*Bragg)/sin(Bragg)')
if endstation == False:
pyplot.ylabel('dy at BPM1 (mm)')
else:
pyplot.ylabel('dy at endstation (mm)')
pyplot.show()
| NSLS-II-SRX/ipython_ophyd | profile_xf05id1/startup/98-hdcmc1rollc2xcalib.py | Python | bsd-2-clause | 9,969 | [
"CRYSTAL"
] | 11c6a13c248c781a9cb437436c614ac73cdf94ab7543b655e86ec35502d065b2 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Window')
from data_400ms import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:10]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=9)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Time_Window/test10_cross_validate_categories_400ms.py | Python | mit | 4,371 | [
"Mayavi"
] | 90334323e8bbcc2e651c6a0654755058a4fb87743d4b110dcbfd2f6a14316903 |
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import jsontools
from core import scrapertools
from core import tmdb
from core.item import Item
from megaserver import Client
from platformcode import config, logger, platformtools
__modo_grafico__ = config.get_setting('modo_grafico', 'puyasubs')
__perfil__ = config.get_setting('perfil', "puyasubs")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "https://puya.moe"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Anime", thumbnail=item.thumbnail,
url= host + "/?cat=4", text_color=color1))
itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Doramas", thumbnail=item.thumbnail,
url= host + "/?cat=142", text_color=color1))
itemlist.append(Item(channel=item.channel, action="", title="Descargas", text_color=color2))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes y Doramas en proceso",
thumbnail=item.thumbnail, url= host + "/?page_id=25501", text_color=color1))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes Finalizados",
thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1))
itemlist.append(Item(channel=item.channel, action="letra", title=" Descargas Animes Finalizados por Letra",
thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Doramas Finalizados",
thumbnail=item.thumbnail, url= host + "/?page_id=25507", text_color=color1))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Películas y Ovas",
thumbnail=item.thumbnail, url= host + "/?page_id=25503", text_color=color1))
itemlist.append(Item(channel=item.channel, action="torrents", title="Lista de Torrents", thumbnail=item.thumbnail,
url="https://www.frozen-layer.com/buscar/descargas", text_color=color1))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar anime/dorama/película",
thumbnail=item.thumbnail, url= host + "/?s=", text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color=color5, folder=False))
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
texto = texto.replace(" ", "+")
item.url += texto
item.extra = "busqueda"
try:
return listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listado(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<h2 class="entry-title">(.*?)</article>')
patron = 'href="([^"]+)".*?>(.*?)</a>.*?(?:<span class="bl_categ">(.*?)|</span>)</footer>'
for bloque in bloques:
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, cat in matches:
thumb = scrapertools.find_single_match(bloque, 'src="([^"]+)"')
tipo = "tvshow"
if item.extra == "busqueda" and cat:
if "Anime" not in cat and "Dorama" not in cat and "Película" not in cat:
continue
if "Película" in cat or "Movie" in title:
tipo = "movie"
contenttitle = title.replace("[TeamDragon] ", "").replace("[PuyaSubs!] ", "").replace("[Puya+] ", "")
contenttitle = scrapertools.find_single_match(contenttitle,
"(.*?)(?:\s+\[|\s+–|\s+–| Episodio| [0-9]{2,3})")
filtro_tmdb = {"original_language": "ja"}.items()
itemlist.append(Item(channel=item.channel, action="findvideos", url=url, title=title, thumbnail=thumb,
contentTitle=contenttitle, show=contenttitle, contentType=tipo,
infoLabels={'filtro': filtro_tmdb}, text_color=color1))
if ("cat=4" in item.url or item.extra == "busqueda") and not item.extra == "novedades":
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
next_page = scrapertools.find_single_match(data, "<span class='current'>.*? href='([^']+)'")
if next_page:
next_page = next_page.replace("&", "&")
itemlist.append(Item(channel=item.channel, action="listado", url=next_page, title=">> Página Siguiente",
thumbnail=item.thumbnail, extra=item.extra, text_color=color2))
return itemlist
def descargas(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
data = httptools.downloadpage(item.url).data
data = data.replace("/puya.se/", "/puya.si/").replace("/puya.si/", "/puya.moe/")
patron = '<li><a href="(%s/\?page_id=\d+|http://safelinking.net/[0-9A-z]+)">(.*?)</a>' % host
if item.letra:
bloque = scrapertools.find_single_match(data,
'<li>(?:<strong>|)' + item.letra + '(?:</strong>|)</li>(.*?)</ol>')
matches = scrapertools.find_multiple_matches(bloque, patron)
else:
matches = scrapertools.find_multiple_matches(data, patron)
for url, title in matches[item.pagina:item.pagina + 20]:
contenttitle = title.replace("[TeamDragon] ", "").replace("[PuyaSubs!] ", "") \
.replace("[Puya+] ", "")
contenttitle = re.sub(r'(\[[^\]]*\])', '', contenttitle).strip()
filtro_tmdb = {"original_language": "ja"}.items()
season = scrapertools.find_single_match(contenttitle,' S(\d+)')
if season:
contenttitle = contenttitle.replace(" S" + season, "")
else:
season = ""
tipo = "tvshow"
if "page_id=25503" in item.url:
tipo = "movie"
action = "findvideos"
if "safelinking" in url:
action = "extract_safe"
itemlist.append(Item(channel=item.channel, action=action, url=url, title=title, contentTitle=contenttitle,
show=contenttitle, contentType=tipo, infoLabels={'season':season},
text_color=color1))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.pagina + 20:
pagina = item.pagina + 20
itemlist.append(Item(channel=item.channel, action="descargas", url=item.url, title=">> Página Siguiente",
thumbnail=item.thumbnail, pagina=pagina, letra=item.letra, text_color=color2))
return itemlist
def letra(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
patron = '<li>(?:<strong>|)([A-z#]{1})(?:</strong>|)</li>'
matches = scrapertools.find_multiple_matches(data, patron)
for match in matches:
itemlist.append(Item(channel=item.channel, title=match, action="descargas", letra=match, url=item.url,
thumbnail=item.thumbnail, text_color=color1))
return itemlist
def torrents(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
post = "utf8=%E2%9C%93&busqueda=puyasubs&search=Buscar&tab=anime&con_seeds=con_seeds"
data = httptools.downloadpage(item.url, post=post).data
patron = "<td>.*?href='([^']+)' title='descargar torrent'>.*?title='informacion de (.*?)'.*?<td class='fecha'>.*?<td>(.*?)</td>" \
".*?<span class=\"stats\d+\">(\d+)</span>.*?<span class=\"stats\d+\">(\d+)</span>"
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, size, seeds, leechers in matches[item.pagina:item.pagina + 25]:
contentTitle = title
if "(" in contentTitle:
contentTitle = contentTitle.split("(")[0]
size = size.strip()
filtro_tmdb = {"original_language": "ja"}.items()
title += " [COLOR %s][Seeds:%s[/COLOR]|[COLOR %s]Leech:%s[/COLOR]|%s]" % (
color4, seeds, color5, leechers, size)
url = "https://www.frozen-layer.com" + url
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, contentTitle=contentTitle,
server="torrent", show=contentTitle, contentType="tvshow", text_color=color1,
infoLabels={'filtro': filtro_tmdb}))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.pagina + 25:
pagina = item.pagina + 25
itemlist.append(Item(channel=item.channel, action="torrents", url=item.url, title=">> Página Siguiente",
thumbnail=item.thumbnail, pagina=pagina, text_color=color2))
else:
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" rel="next"')
if next_page:
next_page = "https://www.frozen-layer.com" + next_page
itemlist.append(Item(channel=item.channel, action="torrents", url=next_page, title=">> Página Siguiente",
thumbnail=item.thumbnail, pagina=0, text_color=color2))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data2 = data.replace("\n","")
idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?) \[')
idiomas = idiomas.replace("Español Latino", "Latino").replace("Español España", "Castellano")
ty = scrapertools.find_single_match(data, '720p: <a href=(.*?)1080p: <a href="')
if ty:
calidades = ['720p', '1080p']
else:
calidades = ['1080p', '720p']
torrentes = scrapertools.find_multiple_matches(data, '<a href="((?:https://www.frozen-layer.com/descargas[^"]+|https://nyaa.si/view/[^"]+|https://anidex.info/torrent/[^"]+))"')
if torrentes:
for i, enlace in enumerate(torrentes):
title = "Ver por Torrent %s" % idiomas
if "720p" in data and "1080p" in data2:
title = "[%s] %s" % (calidades[i], title)
if "anidex.info" in enlace:
enlace = enlace.replace("/torrent/", "/dl/")
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
elif "nyaa" in enlace:
data1 = httptools.downloadpage(enlace).data
enlace = "https://nyaa.si" + scrapertools.find_single_match(data1, 'a href="(/do[^"]+)')
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
enlace = scrapertools.find_single_match(data1, '<a href="(magnet[^"]+)')
itemlist.append(item.clone(title=title+"(magnet]", action="play", url=enlace, server="torrent"))
#itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
onefichier = scrapertools.find_multiple_matches(data, '<a href="(https://1fichier.com/[^"]+)"')
if onefichier:
for i, enlace in enumerate(onefichier):
title = "Ver por 1fichier %s" % idiomas
if "720p" in data and "1080p" in data2:
try:
title = "[%s] %s" % (calidades[i], title)
except:
pass
itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier"))
puyaenc = scrapertools.find_multiple_matches(data, '<a href="(%s/enc/[^"]+)"' % host)
if puyaenc:
import base64, os, jscrypto
action = "play"
for i, enlace in enumerate(puyaenc):
data_enc = httptools.downloadpage(enlace).data
jk, encryp = scrapertools.find_single_match(data_enc, " return '(\d+)'.*?crypted\" VALUE=\"(.*?)\"")
iv = os.urandom(16)
jk = base64.b16decode(jk)
encryp = base64.b64decode(encryp)
crypto = jscrypto.new(jk, jscrypto.MODE_CBC, iv)
decryp = crypto.decrypt(encryp).rstrip('\0')
link = decryp.split('#')
link = decryp.replace(link[0], "https://mega.nz/")
title = "Ver por Mega %s" % idiomas
if "720p" in data and "1080p" in data2:
try:
title = "[%s] %s" % (calidades[i], title)
except:
pass
if "/#F!" in link:
action = "carpeta"
itemlist.append(item.clone(title=title, action=action, url=link, server="mega"))
safelink = scrapertools.find_multiple_matches(data, '<a href="(http(?:s|)://.*?safelinking.net/[^"]+)"')
domain = ""
server = ""
if safelink:
for i, safe in enumerate(safelink):
headers = {'Content-Type': 'application/json'}
hash = safe.rsplit("/", 1)[1]
post = jsontools.dump({"hash": hash})
data_sf = httptools.downloadpage("http://safelinking.net/v1/protected", post=post, headers=headers).json
try:
for link in data_sf.get("links"):
enlace = link["url"]
action = "play"
if "tinyurl" in enlace:
header = httptools.downloadpage(enlace, follow_redirects=False).headers
enlace = header['location']
elif "mega." in enlace:
server = "mega"
domain = "Mega"
if "/#F!" in enlace:
action = "carpeta"
elif "1fichier." in enlace:
server = "onefichier"
domain = "1fichier"
if "/dir/" in enlace:
action = "carpeta"
elif "google." in enlace:
server = "gvideo"
domain = "Gdrive"
if "/folders/" in enlace:
action = "carpeta"
title = "Ver por %s" % domain
if idiomas:
title += " [Subs: %s]" % idiomas
if "720p" in data and "1080p" in data2:
try:
title = "[%s] %s" % (calidades[i], title)
except:
pass
itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
except:
pass
return itemlist
def carpeta(item):
logger.info()
itemlist = list()
if item.server == "onefichier":
data = httptools.downloadpage(item.url).data
patron = '<tr>.*?<a href="([^"]+)".*?>(.*?)</a>.*?<td class="normal">(.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, size in matches:
scrapedtitle += " (%s) [1fichier]" % size
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play",
server="onefichier", text_color=color1, thumbnail=item.thumbnail,
infoLabels=item.infoLabels))
elif item.server == "gvideo":
data = httptools.downloadpage(item.url, headers={"Referer": item.url}).data
patron = "'_DRIVE_ivd'] = '(.*?)'"
matches = scrapertools.find_single_match(data, patron)
data = data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
newpatron = ',\["(.*?)",\[".*?,"(.*?)","video'
newmatches = scrapertools.find_multiple_matches(data, newpatron)
for url, scrapedtitle in newmatches:
url = "https://drive.google.com/open?id=%s" % url
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=url, action="play",
server="gvideo", text_color=color1, thumbnail=item.thumbnail,
infoLabels=item.infoLabels))
else:
from servers import mega
check, msg = mega.test_video_exists(item.url)
if check == False:
itemlist.append(Item(channel=item.channel, title=msg, url="",
text_color=color1, thumbnail=item.thumbnail,
infoLabels=item.infoLabels))
else:
c = Client(url=item.url)
files = c.get_files()
c.stop()
for enlace in files:
try:
file_id = enlace["id"]
except:
continue
itemlist.append(
Item(channel=item.channel, title=enlace["name"], url=item.url + "|" + file_id, action="play",
server="mega", text_color=color1, thumbnail=item.thumbnail,
infoLabels=item.infoLabels))
itemlist.sort(key=lambda item: item.title)
return itemlist
def extract_safe(item):
logger.info()
if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
itemlist = list()
hash = item.url.rsplit("/", 1)[1]
headers = [['Content-Type', 'application/json;charset=utf-8']]
post = jsontools.dump({"hash": hash})
data = httptools.downloadpage("http://safelinking.net/v1/protected", post=post, headers=headers).json
for link in data.get("links"):
enlace = link["url"]
domain = link["domain"]
title = "Ver por %s" % domain
action = "play"
if "mega" in domain:
server = "mega"
if "/#F!" in enlace:
action = "carpeta"
elif "1fichier" in domain:
server = "onefichier"
if "/dir/" in enlace:
action = "carpeta"
itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = host + "/?cat=4"
item.extra = "novedades"
itemlist = listado(item)
if itemlist[-1].action == "listado":
itemlist.pop()
for it in itemlist:
it.contentTitle = it.title
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
| alfa-jor/addon | plugin.video.alfa/channels/puyasubs.py | Python | gpl-3.0 | 19,794 | [
"MOE"
] | beeb0baf0441b1e739d5bcf4c724fd87d928a922030892ab27db6d38194f3994 |
## this script is a post process for lammps nanoindent *****
# the motivation for making this script is to find the maximum per atom force and per atom pe of run from dumpfiles
# nano indent output gives the total force felt by a indenter, so bigger the indenter the more force (sum of all atoms below the indenter) will be felt...
# ...however, the peratom force is not seen from these output
# this script will take in a dump file containing of potential energy and force with multiple timesteps and get the maximum and minimum as a function of timesteps
# output1: maximum and minimum potential energy as a function of timesteps
# output2: maximum and minimum forces as a function of timesteps
import matplotlib.pyplot as plt
import numpy as np
import glob
#delete the headers every timesteps (snapshots)
#the following command deletes the line with the string "ITEM:..*" and also 8 lines following that string
'''
sed -i '/ITEM: TIMESTEP/,+8d' dump.test
'''
natoms = 1646 #2023 21054 9126
nsnaps = 601 #no. of snaps or timesteps (*actual timestep in lammps to get the real timesteps); note: 0 is also a step
filename = 'dump.test' # lammps dump file after processing with 'sed' command
flist = glob.glob(filename)
for f in flist:
load = np.genfromtxt(f, dtype=float, usecols=(4,7)) #z-coord, fz
data=np.array(load)
#define a 3-D array for storing snapshot, z-coordinate and fz
#each storey is for each timesteps or snapshot (you can get actual timestep from snaps; timesteps = snap * lammps timestep)
#each row is for a certain atom
#each col1 is the z coord
#each col2 is the fz
data_split = np.zeros((nsnaps, natoms, 2))
data_split [:] = np.split(data,nsnaps) #equally divides the data into 'nsnaps' equal size along the default axis
# check data_splits again by printing it
# print data_split [1,:,:]
#make array for storing the force and y-position along with timesteps
fz_array = np.zeros((nsnaps-1,2))
z_array = np.zeros((nsnaps-1,2))
for i in range(nsnaps-1):
fz_array [i,0] = i #timesteps
fz_array [i,1] = np.mean(data_split [i,:,1]) #instead of just taking the max you might take average force too
for i in range(nsnaps-1):
z_array [i,0] = i #timesteps
z_array [i,1] = np.mean(data_split [i,:,0])
#plt.plot(fz_array[:,0],fz_array[:,1])
#plt.xlabel('Time (ps)')
#plt.ylabel('force (eV/A)')
plt.plot(fz_array[:,0],fz_array[:,1])
plt.xlabel('Time (ps)')
plt.ylabel('force_z (eV/A))')
plt.show()
#np.savetxt('H2O_GP3.0_MSD',np.c_[data_avg[:,0],data_avg[:,4]])
| msadat/python-scripts | dump2force-vs-displacement.py | Python | gpl-3.0 | 2,532 | [
"LAMMPS"
] | f8325a88ed3e3633894764632fe3d033731b89a312e34a9dea698d5888ac0971 |
from ... import numpy as np
from .. import logging
import scipy.stats
from collections import OrderedDict
import warnings
logger = logging.getLogger('rsdfit.emcee_results')
logger.addHandler(logging.NullHandler())
class EmceeParameter(object):
"""
Class to hold the parameter fitting result
"""
def __init__(self, name, trace, burnin=0):
self.name = name
self._trace = trace # shape is (nwalkers, niters)
self.burnin = int(burnin)
def __repr__(self):
sig1 = self.one_sigma
sig2 = self.two_sigma
args = (self.name+":", self.median, sig1[-1], sig1[0], sig2[-1], sig2[0])
return "<Parameter {:<15s} {:.4g} (+{:.4g} {:.4g}) (+{:.4g} {:.4g})>".format(*args)
def __str__(self):
return self.__repr__()
def _to_str(self, val):
args = (self.name+":", val)
return "<Parameter {:<15s} {:.4g}>".format(*args)
@property
def fiducial(self):
"""
The fiducial value of the parameter
"""
try:
return self._fiducial
except AttributeError:
return None
@fiducial.setter
def fiducial(self, val):
self._fiducial = val
@property
def burnin(self):
"""
The number of iterations to exclude as part of the "burn-in" phase
"""
return self._burnin
@burnin.setter
def burnin(self, val):
self._burnin = val
del self.median, self.one_sigma, self.two_sigma, self.three_sigma
@property
def error_rescaling(self):
"""
Rescale errors by this factor
"""
try:
return self._error_rescaling
except:
return 1
@error_rescaling.setter
def error_rescaling(self, val):
self._error_rescaling = val
del self.one_sigma, self.two_sigma, self.three_sigma
@property
def flat_trace(self):
"""
Returns the flattened chain, excluding steps that occured during the
"burnin" period
"""
return self._trace[:, self.burnin:].flatten()
@property
def median(self):
"""
Return the median of the trace, i.e., the 50th percentile of the trace
"""
try:
return self._median
except AttributeError:
self._median = np.percentile(self.flat_trace, 50.)
return self._median
@median.deleter
def median(self):
if hasattr(self, '_median'): delattr(self, '_median')
@property
def mean(self):
"""
Return the average value of the chain
"""
return self.flat_trace.mean()
@property
def peak(self):
"""
Return the value of the parameter that gives the peak of the
posterior PDF, as determined through Gaussian kernel density estimation
"""
try:
return self._peak
except AttributeError:
kern = scipy.stats.gaussian_kde(self.flat_trace)
self._peak = scipy.optimize.fmin(lambda x: -kern(x), self.median, disp=False)[0]
return self._peak
@peak.deleter
def peak(self):
if hasattr(self, '_peak'): delattr(self, '_peak')
@property
def one_sigma(self):
"""
Return the lower and upper one-sigma error intervals
These are computed from the percentiles, ``50 - 15.86555`` and ``84.13445 - 50``
Returns
-------
lower, upper
The lower and upper 1-sigma error intervals
"""
try:
return self._one_sigma
except AttributeError:
percentiles = [50., 15.86555, 84.13445]
vals = np.percentile(self.flat_trace, percentiles)*self.error_rescaling
self._one_sigma = [-(vals[0] - vals[1]), vals[2] - vals[0]]
return self._one_sigma
@one_sigma.deleter
def one_sigma(self):
if hasattr(self, '_one_sigma'): delattr(self, '_one_sigma')
@property
def two_sigma(self):
"""
Return the lower and upper two-sigma error intervals.
These are computed from the percentiles, ``50 - 2.2775`` and ``97.7225 - 50``
Returns
-------
lower, upper
The lower and upper 2-sigma error intervals
"""
try:
return self._two_sigma
except AttributeError:
percentiles = [50, 2.2775, 97.7225]
vals = np.percentile(self.flat_trace, percentiles)*self.error_rescaling
self._two_sigma = [-(vals[0] - vals[1]), vals[2] - vals[0]]
return self._two_sigma
@two_sigma.deleter
def two_sigma(self):
if hasattr(self, '_two_sigma'): delattr(self, '_two_sigma')
@property
def three_sigma(self):
"""
Return the lower and upper three-sigma error intervals
There are computed from the percentiles, ``50 - 0.135`` and ``99.865 - 50``
Returns
-------
lower, upper
The lower and upper 3-sigma error intervals
"""
try:
return self._three_sigma
except AttributeError:
percentiles = [50, 0.135, 99.865]
vals = np.percentile(self.flat_trace, percentiles)*self.error_rescaling
self._three_sigma = [-(vals[0] - vals[1]), vals[2] - vals[0]]
return self._three_sigma
@three_sigma.deleter
def three_sigma(self):
if hasattr(self, '_three_sigma'): delattr(self, '_three_sigma')
def trace(self, niter=None):
"""
Return the sample values at a specific iteration number.
shape: (nwalkers, niters)
"""
if niter is None:
return self._trace
else:
return self._trace[:,niter]
@property
def stderr(self):
"""
The one-sigma standard error, averaging the lower and upper bounds
"""
one_sigma = self.one_sigma
return 0.5*(abs(one_sigma[0]) + one_sigma[1])
class EmceeResults(object):
"""
Class to hold the fitting results from an `emcee` MCMC run
"""
def __init__(self, sampler, fit_params, burnin=None, **meta):
"""
Initialize with the `emcee` sampler and the fitting parameters
"""
# store the parameter names
self.free_names = fit_params.free_names
self.constrained_names = fit_params.constrained_names
# chain
(walkers, _, ndim) = sampler.chain.shape
inds = np.nonzero(sampler.chain)
self.chain = sampler.chain[inds].reshape((walkers, -1, ndim)) # (nwalkers, niters, npars)
# lnprob
inds = np.nonzero(sampler.lnprobability)
self.lnprobs = sampler.lnprobability[inds].reshape((self.walkers, -1)) # (nwalkers, niters)
# other sampler attributes
self.acceptance_fraction = sampler.acceptance_fraction
try:
self.autocorr_times = sampler.acor
except:
self.autocorr_times = np.zeros(len(self.free_names))
# make the constrained chain
self._make_constrained_chain(fit_params)
# make result params
self._save_results()
# save fiducial values
for name in self:
self[name].fiducial = fit_params[name].fiducial
# set the burnin
if burnin is None:
max_autocorr = 3*np.amax(self.autocorr_times)
burnin = int(max_autocorr) if not np.isnan(max_autocorr) else int(0.1*self.iterations)
logger.info("setting the burnin period to {} iterations".format(burnin))
self.burnin = int(burnin)
# save meta-data
self.attrs = OrderedDict(**meta)
def to_npz(self, filename, **meta):
"""
Save the relevant information of the class to a numpy ``npz`` file
"""
atts = ['free_names', 'constrained_names', 'chain', 'lnprobs',
'acceptance_fraction', 'autocorr_times','constrained_chain',
'burnin', 'attrs']
self.attrs.update(**meta)
d = {k:getattr(self, k) for k in atts}
for k in ['model_version', 'pyrsd_version']:
d[k] = getattr(self, k, None)
np.savez(filename, **d)
@classmethod
def from_npz(cls, filename):
"""
Load a numpy ``npz`` file and return the corresponding ``EmceeResults`` object
"""
toret = cls.__new__(cls)
with np.load(filename, encoding='latin1') as ff:
for k, v in ff.items():
if k == 'burnin' or k == 'attrs':
continue
setattr(toret, k, v)
toret._save_results()
toret.burnin = int(ff['burnin'])
if 'attrs' in ff:
toret.attrs = ff['attrs'].tolist()
else:
toret.attrs = OrderedDict()
toret.free_names = list(toret.free_names)
toret.constrained_names = list(toret.constrained_names)
return toret
def __iter__(self):
return iter(self.free_names + self.constrained_names)
def to_str(self, max_lnprob=False):
if not max_lnprob:
return self.__str__()
else:
free_vals = self.max_lnprob_values()
free_params = [self[name]._to_str(v) for name, v in zip(self.free_names, free_vals)]
constrained_vals = self.max_lnprob_constrained_values()
constrained_params = [self[name]._to_str(v) for name, v in zip(self.constrained_names, constrained_vals)]
# first get the parameters
toret = "Free parameters\n" + "_"*15 + "\n"
toret += "\n".join(free_params)
toret += "\n\nConstrained parameters\n" + "_"*22 + "\n"
toret += "\n".join(constrained_params)
return toret
def __str__(self):
free_params = [self[name] for name in self.free_names]
constrained_params = [self[name] for name in self.constrained_names]
# first get the parameters
toret = "Free parameters [ median (+/-68%) (+/-95%) ]\n" + "_"*15 + "\n"
toret += "\n".join(map(str, free_params))
toret += "\n\nConstrained parameters [ median (+/-68%) (+/-95%) ]\n" + "_"*22 + "\n"
toret += "\n".join(map(str, constrained_params))
return toret
def __repr__(self):
N = len(self.constrained_names)
return "<EmceeResults: {} free parameters, {} constrained parameters>".format(self.ndim, N)
def __getitem__(self, key):
# check if key is the name of a free or constrained param
if key in (self.free_names + self.constrained_names):
return self._results[key]
else:
return getattr(self, key)
def verify_param_ordering(self, free_params, constrained_params):
"""
Verify the ordering of `EmceeResults.chain`, making sure that the
chains have the ordering specified by `free_params` and
`constrained_params`
"""
if sorted(self.free_names) != sorted(free_params):
raise ValueError("mismatch in `EmceeResults` free parameters")
if sorted(self.constrained_names) != sorted(constrained_params):
raise ValueError("mismatch in `EmceeResults` constrained parameters")
reordered = False
# reorder `self.chain`
if self.free_names != free_params:
inds = [self.free_names.index(k) for k in free_params]
self.chain = self.chain[...,inds]
reordered = True
# reorder self.constrained_chain
if self.constrained_names != constrained_params:
reordered = True
if reordered:
self.free_names = free_params
self.constrained_names = constrained_params
self._save_results()
def __add__(self, other):
"""
Add two `EmceeResults` objects together
"""
if not isinstance(other, self.__class__):
raise NotImplementedError("Can only add two `EmceeResults` objects together")
# check a few things first
if self.walkers != other.chain.shape[0]:
raise ValueError("Cannot add `EmceeResults` objects: mismatch in number of walkers")
# copy to return
toret = self.copy()
# verify the ordering of the other one
other.verify_param_ordering(self.free_names, self.constrained_names)
# add the chains together
toret.chain = np.concatenate((self.chain, other.chain), axis=1)
tmp = np.empty((toret.walkers, toret.iterations), dtype=self.constrained_chain.dtype)
for name in tmp.dtype.names:
tmp[name][:] = np.concatenate([self.constrained_chain[name], other.constrained_chain[name]], axis=1)[:]
toret.constrained_chain = tmp
# add the log probs together
toret.lnprobs = np.concatenate((self.lnprobs, other.lnprobs), axis=1)
# update the new EmceeParameters
toret._save_results()
return toret
def __radd__(self, other):
return self.__add__(other)
def copy(self):
"""
Return a deep copy of the `EmceeResults` object
"""
import copy
return copy.deepcopy(self)
def _make_constrained_chain(self, fit_params):
"""
Make the chain for the constrained parameters
"""
if len(self.constrained_names) == 0:
self.constrained_chain = None
return
# make the constrained chain from the other chain
shape = (self.walkers, self.iterations)
self.constrained_chain = np.empty(shape, dtype=fit_params.constrained_dtype)
for niter in range(self.iterations):
for nwalker, theta in enumerate(self.chain[:,niter,:]):
# set the free parameters
for val, name in zip(theta, self.free_names):
fit_params[name].value = val
# update constraints
fit_params.update_values()
# set the constrained vals
constrained_vals = fit_params.constrained_values
for i, name in enumerate(self.constrained_names):
self.constrained_chain[name][nwalker, niter] = constrained_vals[i]
# # check for any constrained values that are fixed and remove
# tocat = ()
# names = []
# for i in range(shape[-1]):
# trace = self.constrained_chain[...,i]
# fixed = len(np.unique(trace)) == 1
# if not fixed:
# tocat += (trace[...,None],)
# names.append(self.constrained_names[i])
#
# self.constrained_names = names
# self.constrained_chain = np.concatenate(tocat, axis=2)
def _save_results(self):
"""
Make the dictionary of `EmceeParameters`
"""
self._results = {}
# the free parameters
for i, name in enumerate(self.free_names):
self._results[name] = EmceeParameter(name, self.chain[...,i])
# the constrained parameters
if self.constrained_chain is not None:
for i, name in enumerate(self.constrained_names):
self._results[name] = EmceeParameter(name, self.constrained_chain[name])
#---------------------------------------------------------------------------
# some convenience attributes
#---------------------------------------------------------------------------
@property
def iterations(self):
"""
The number of iterations performed, as computed from the chain
"""
return self.chain.shape[1]
@property
def walkers(self):
"""
The number of walkers, as computed from the chain
"""
return self.chain.shape[0]
@property
def ndim(self):
"""
The number of free parameters
"""
return len(self.free_names)
@property
def burnin(self):
"""
The number of iterations to treat as part of the "burnin" period, where
the chain hasn't stabilized yet
"""
return self._burnin
@burnin.setter
def burnin(self, val):
if val > self.iterations: val = 0
self._burnin = val
for param in self._results:
self._results[param].burnin = val
@property
def error_rescaling(self):
"""
Rescale error on parameters due to covariance matrix from mocks
"""
try:
return self._error_rescaling
except:
return 1.
@error_rescaling.setter
def error_rescaling(self, val):
self._error_rescaling = val
for param in self._results:
self._results[param].error_rescaling = val
@property
def max_lnprob(self):
"""
The value of the maximum log probability
"""
return self.lnprobs.max()
def max_lnprob_values(self):
"""
Return the value of the free parameters at the iteration with the maximum
probability
"""
nwalker, niter = np.unravel_index(self.lnprobs.argmax(), self.lnprobs.shape)
return self.chain[nwalker, niter, :]
def max_lnprob_constrained_values(self):
"""
Return the value of the constrained parameters at the iteration
with the maximum probability
"""
nwalker, niter = np.unravel_index(self.lnprobs.argmax(), self.lnprobs.shape)
return np.array(self.constrained_chain[nwalker, niter].tolist())
def values(self):
"""
Convenience function to return the median values for the free parameters
as an array
"""
return np.array([self[name].median for name in self.free_names])
def constrained_values(self):
"""
Convenience function to return the median values for the constrained
parameters as an array
"""
return np.array([self[name].median for name in self.constrained_names])
def peak_values(self):
"""
Convenience function to return the peak values for the free parameters
as an array
"""
return np.array([self[name].peak for name in self.free_names])
def peak_constrained_values(self):
"""
Convenience function to return the peak values for the
constrained parameters
as an array
"""
return np.array([self[name].peak for name in self.constrained_names])
def plot_timeline(self, *names, **kwargs):
"""
Plot the chain timeline for as many parameters as specified in the
`names` tuple.
This plots the value of each walker as a function
of iteration.
Notes
------
Any iterations during the "burnin" period are excluded
Parameters
----------
names : tuple
The string names of the parameters to plot
outfile : str, optional
If not `None`, save the resulting figure with the specified name
Returns
-------
fig : matplotlib.Figure
The figure object
"""
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from ..analysis import tex_names
outfile = kwargs.get('outfile', None)
N = len(names)
if N < 1:
raise ValueError('Must specify at least one parameter name for timeline plot')
fig, axes = plt.subplots(N, 1, sharex=True, figsize=(8, 9))
if N == 1: axes = [axes]
for i, name in enumerate(names):
param = self[name]
iter_num = range(self.iterations)[self.burnin:]
trace = param.trace()[:, self.burnin:]
# this excludes the burnin period
for t in trace:
axes[i].plot(iter_num, t, color="k", alpha=0.4)
axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].axhline(param.median, color="#888888", lw=2)
if name in tex_names:
name = tex_names[name]
axes[i].set_ylabel(name)
axes[-1].set_xlabel('iteration number', fontsize=16)
if outfile is not None:
fig.savefig(outfile)
return fig
def plot_triangle(self, *names, **kwargs):
"""
Make a triange plot for the desired parameters using the
:func:`corner.corner` function.
Notes
-----
Any iterations during the "burnin" period are excluded
Parameters
----------
names : tuple
The string names of the parameters to plot
thin : int, optional
The factor to thin the number of samples plotted by. Default is
1 (plot all samples)
outfile : str, optional
If not `None`, save the resulting figure with the specified name
Returns
-------
fig : matplotlib.Figure
The figure object
"""
from ..analysis import tex_names
try: import corner
except: raise ImportError("`corner` must be installed")
if len(names) < 2:
raise ValueError('Must specify at least two parameter names to make triangle plot')
thin = kwargs.pop('thin', 1)
outfile = kwargs.pop('outfile', None)
labels = [tex_names.get(name, name) for name in names]
kwargs.setdefault('labels', labels)
# make the sample array for the desired parameters
samples = []
for name in names:
param = self[name]
trace = param.trace()[:, self.burnin::thin].flatten()
samples.append(trace)
fig = corner.corner(np.vstack(samples).T, **kwargs)
if outfile is not None:
fig.savefig(outfile)
return fig
def jointplot_2d(self, param1, param2,
thin=1,
rename={},
crosshairs={},
**kwargs):
"""
Plot the 2D traces of the given parameters, using KDE via the :func:`seaborn.jointplot` function.
Notes
-----
Any iterations during the "burnin" period are excluded
Parameters
----------
param1 : str
the name of the first parameter
param2 : str
the name of the second parameter
thin : int, optional
thin the plotted array randomly by this amount
rename : dict, optional
dictionary giving a string to rename each variable; default
will try to use any latex names stored
crosshairs : dict, optional
values to show as horizontal or vertical lines
**kwargs :
additional keyword arguments to pass to ``seaborn``
"""
import pandas as pd
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import seaborn as sns
from ..analysis import tex_names
names = self.free_names + self.constrained_names
if not all(name in names for name in [param1, param2]):
raise ValueError("specified parameter names not valid")
# default names
rename.setdefault(param1, tex_names.get(param1, param1))
rename.setdefault(param2, tex_names.get(param2, param2))
# make the pandas Series of the flattened traces
trace1 = self[param1].trace()[:, self.burnin::thin].flatten()
trace1 = pd.Series(trace1, name=rename[param1])
trace2 = self[param2].trace()[:, self.burnin::thin].flatten()
trace2 = pd.Series(trace2, name=rename[param2])
# do the plot
kwargs.setdefault('space', 0)
kwargs.setdefault('size', 7)
with sns.axes_style("ticks"):
g = sns.jointplot(trace1, trace2, kind="kde", **kwargs)
# plot any cross-hairs
ax = g.ax_joint
if param1 in crosshairs:
ax.axvline(x=crosshairs[param1], c="#888888", lw=1.5, alpha=0.4)
if param2 in crosshairs:
ax.axhline(y=crosshairs[param2], c="#888888", lw=1.5, alpha=0.4)
return g
def kdeplot_2d(self, param1, param2,
thin=1,
rename={},
crosshairs={},
**kwargs):
"""
Plot the 2D traces of the given parameters, using KDE via :func:`seaborn.kdeplot`
Notes
-----
Any iterations during the "burnin" period are excluded
Parameters
----------
param1 : str
the name of the first parameter
param2 : str
the name of the second parameter
thin : int, optional
thin the plotted array randomly by this amount
rename : dict, optional
dictionary giving a string to rename each variable; default
will try to use any latex names stored
crosshairs : dict, optional
values to show as horizontal or vertical lines
**kwargs :
additional keyword arguments to pass to ``seaborn``
"""
import pandas as pd
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import seaborn as sns
from ..analysis import tex_names
names = self.free_names + self.constrained_names
if not all(name in names for name in [param1, param2]):
raise ValueError("specified parameter names not valid")
# default names
rename.setdefault(param1, tex_names.get(param1, param1))
rename.setdefault(param2, tex_names.get(param2, param2))
# make the pandas Series of the flattened traces
trace1 = self[param1].trace()[:, self.burnin::thin].flatten()
trace1 = pd.Series(trace1, name=rename[param1])
trace2 = self[param2].trace()[:, self.burnin::thin].flatten()
trace2 = pd.Series(trace2, name=rename[param2])
# do the plot
kwargs.setdefault('shade', True)
kwargs.setdefault('shade_lowest', False)
with sns.axes_style("ticks"):
ax = sns.kdeplot(trace1, trace2, **kwargs)
# plot any cross-hairs
if param1 in crosshairs:
ax.axvline(x=crosshairs[param1], c="#888888", lw=1.5, alpha=0.4)
if param2 in crosshairs:
ax.axhline(y=crosshairs[param2], c="#888888", lw=1.5, alpha=0.4)
return ax
def summarize_fit(self):
"""
Summarize the fit, by plotting figures and outputing the relevant
information
"""
args = (self.iterations, self.walkers, self.ndim)
hdr = "Emcee fitting finished: {} iterations, {} walkers, {} free parameters\n".format(*args)
logp = np.amax(self.lnprobs)
chi2 = -2.*logp
hdr += "Best log likelihood value = {:4f}, corresponding to chi2 = {:.4f}\n\n".format(logp, chi2)
# print the results to the logger
logger.info("\n"+hdr+str(self))
def as_dict(self, kind=None):
"""
Return a dictionary of the values, either the `median`,
`peak`, or `max_lnprob`
"""
if kind is None or kind in ['mean', 'median']:
funcs = [self.values, self.constrained_values]
elif kind == 'peak':
funcs = [self.peak_values, self.peak_constrained_values]
elif kind == 'max_lnprob':
funcs = [self.max_lnprob_values, self.max_lnprob_constrained_values]
else:
raise ValueError("`kind` must be one of ['mean', 'median', 'peak', 'max_lnprob']")
toret = {}
names = [self.free_names, self.constrained_names]
for n, f in zip(names, funcs):
d = dict(zip(n, f()))
toret.update(d)
return toret
def chains(self, params=[], labels={}, use_latex=False):
"""
Return a pandas DataFrame holding the chains (flat traces) as
columns for both the free and constrained parameters
Note that any burnin is removed from the chains.
Parameters
----------
params : list, {'free', 'constrained'}, optional
return only a subset of the parameters
use_latex : bool, optional
If `True`, try to use any available latex names for the tick
labels; default is `False`
Returns
-------
DataFrame :
the DataFrame holding the flat traces for all parameters
"""
import pandas as pd
from ..analysis import tex_names
# do free or constrained
if params == 'free':
params = self.free_names
elif params == 'constrained':
params = self.constrained_names
elif isinstance(params, str):
params = [params]
d = {}
# add the free chains
for p in self.free_names:
if len(params) and p not in params:
continue
if p in labels:
d[labels[p]] = self[p].flat_trace
elif use_latex and p in tex_names:
d[tex_names[p]] = self[p].flat_trace
else:
d[p] = self[p].flat_trace
# add the constrained chains
for p in self.constrained_names:
if len(params) and p not in params:
continue
if p in labels:
d[labels[p]] = self[p].flat_trace
elif use_latex and p in tex_names:
d[tex_names[p]] = self[p].flat_trace
else:
d[p] = self[p].flat_trace
return pd.DataFrame(d)
def corr(self, params=[], labels={}, use_latex=False):
"""
Return a pandas DataFrame holding the correlation matrix,
as computed from the ``chains`` DataFrame, for all
parameters
Parameters
----------
params : list, {'free', 'constrained'}, optional
return only a subset of the parameters
use_latex : bool, optional
If `True`, try to use any available latex names for the tick
labels; default is `False`
Returns
-------
DataFrame (Np, Np) :
the DataFrame holding the correlation between parameters
"""
return self.chains(params=params, labels=labels, use_latex=use_latex).corr()
def sorted_1d_corrs(self, params=[], use_latex=False, absval=False):
"""
Return a pandas DataFrame holding the correlation matrix,
as computed from the ``chains`` DataFrame, for all
parameters
Parameters
----------
params : list, {'free', 'constrained'}, optional
return only a subset of the parameters
use_latex : bool, optional
If `True`, try to use any available latex names for the tick
labels; default is `False`
Returns
-------
DataFrame (Np, Np) :
the DataFrame holding the correlation between parameters
"""
# the 2D correlation matrix
corr = self.corr(params=params, use_latex=use_latex)
if absval: corr = corr.abs()
# generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set the upper triangle (including diag) to NaN
corr.values[mask] = np.nan
# unstack
s = corr.unstack()
# sort and remove the NaNs
so = s.sort_values(kind="quicksort", ascending=False)
so = so[~so.isnull()]
# reset the index
so = so.reset_index()
so.columns = ['param_1', 'param_2', 'corr']
return so
def plot_correlation(self, params=[], use_latex=True, labelsize=10):
"""
Plot the diagonal correlation matrix, using ``seaborn.heatmap``
Parameters
----------
params : list, {'free', 'constrained'}, optional
return only a subset of the parameters
use_latex : bool, optional
If `True`, try to use any available latex names for the tick
labels; default is `True`. You might want to set this to `False`
if you are trying to use mpld3
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import seaborn as sns
sns.set(style="white")
f, ax = sns.plt.subplots(figsize=(11, 9))
# Compute the correlation matrix
corr = self.corr(params=params, use_latex=True)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
# format the ticks
for f in [sns.plt.xticks, sns.plt.yticks]:
locs, labels = f()
sns.plt.setp(labels, rotation=45, fontsize=labelsize)
return ax
| nickhand/pyRSD | pyRSD/rsdfit/results/emcee_results.py | Python | gpl-3.0 | 33,251 | [
"Gaussian"
] | 0bece4091d9b76f03962314c8b48367dbd5e14f947aaefae60e1426ab677e5ef |
from __future__ import unicode_literals
from .common import InfoExtractor
class NhkVodIE(InfoExtractor):
_VALID_URL = r'https?://www3\.nhk\.or\.jp/nhkworld/en/vod/(?P<id>.+?)\.html'
_TEST = {
# Videos available only for a limited period of time. Visit
# http://www3.nhk.or.jp/nhkworld/en/vod/ for working samples.
'url': 'http://www3.nhk.or.jp/nhkworld/en/vod/tokyofashion/20160815.html',
'info_dict': {
'id': 'A1bnNiNTE6nY3jLllS-BIISfcC_PpvF5',
'ext': 'flv',
'title': 'TOKYO FASHION EXPRESS - The Kimono as Global Fashion',
'description': 'md5:db338ee6ce8204f415b754782f819824',
'series': 'TOKYO FASHION EXPRESS',
'episode': 'The Kimono as Global Fashion',
},
'skip': 'Videos available only for a limited period of time',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
embed_code = self._search_regex(
r'nw_vod_ooplayer\([^,]+,\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'ooyala embed code', group='id')
title = self._search_regex(
r'<div[^>]+class=["\']episode-detail["\']>\s*<h\d+>([^<]+)',
webpage, 'title', default=None)
description = self._html_search_regex(
r'(?s)<p[^>]+class=["\']description["\'][^>]*>(.+?)</p>',
webpage, 'description', default=None)
series = self._search_regex(
r'<h2[^>]+class=["\']detail-top-player-title[^>]+><a[^>]+>([^<]+)',
webpage, 'series', default=None)
return {
'_type': 'url_transparent',
'ie_key': 'Ooyala',
'url': 'ooyala:%s' % embed_code,
'title': '%s - %s' % (series, title) if series and title else title,
'description': description,
'series': series,
'episode': title,
}
| jcoady9/youtube-dl | youtube_dl/extractor/nhk.py | Python | unlicense | 1,968 | [
"VisIt"
] | d53a20720374dd45f864ce1d4b5937f8f196d8f2b7a4ebf3469160fe43c249b0 |
import pandas as pd
import pickle
data_dir = "data/"
gene_id = "uc021pzf.2"
gene_file_suffix = ".gtf"
gene_file = "refGene-chr10-{}-exons".format(gene_id)
cigar_pickle_file = "transcript-{}-cigarIntervals.pickle".format(gene_id)
cyto_file = gene_file + ".cyto"
# load read intervals from serialized file
with open(data_dir + cigar_pickle_file, mode='br') as f:
intervals = pickle.load(f)
# load exons from .GTF
exons = {}
data = pd.read_table(data_dir + gene_file + gene_file_suffix, sep="\t")
first = max(data.index)
# create dictionary with keys as exon names and values with starting and ending position of exon
# gene is on "-" strand so first exon is with highest starting position
# {"exon_XX": (start, end)}
for index, row in data.iterrows():
exons["exon_{:02d}".format(first)] = (int(row["start"]), int(row["end"]))
first -= 1
joined_exons = {}
# iterate through read intervals and add connected exons as keys and their counts to dictionary
# keys are exon tuples: e.g. (exon2, exon5)
for i in intervals:
lastExon = -1
for ii in i:
for exon in sorted(exons.keys()):
if (ii[0] <= exons[exon][0] < ii[1]) or (ii[0] < exons[exon][1] <= ii[1]) or \
(exons[exon][0] <= ii[0] <= ii[1] <= exons[exon][1]) or (ii[0] <= exons[exon][0] <= exons[exon][1] <= ii[1]):
if (lastExon, exon) not in joined_exons.keys():
if lastExon != exon and lastExon != -1:
joined_exons[(lastExon, exon)] = 1
else:
joined_exons[(lastExon, exon)] += 1
lastExon = exon
# write table for cytoscape
with open(cyto_file, mode="w") as f:
f.write("NODE_1\tNODE_2\tINTERACTION\tSUM_LINKS\n")
for exons in joined_exons.keys():
f.write("{}\t{}\tpd\t{}\n".format(exons[0], exons[1], joined_exons[exons])) | gorgitko/bioinformatics-chemoinformatics | bioinformatics/exon-joining/03_exons.py | Python | mit | 1,861 | [
"Cytoscape"
] | 2de33796ba9bae6773b922f7f9a6894c652e7398a8b8156e377036a4e35c2e19 |
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.Imas.SmsFunctions'
VERSION = '1.0'
AUTHOR = 'Wouter D\'Haeseleer'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.Imas']
PACKAGES = ['ZenPacks', 'ZenPacks.Imas', 'ZenPacks.Imas.SmsFunctions']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.2'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*', 'php/*'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| zenoss/Community-Zenpacks | ZenPacks.Imas.SmsFunctions/setup.py | Python | gpl-2.0 | 3,276 | [
"VisIt"
] | 7973017ffd1404895824d168afc98e88516e80c64c0c9d2e41d0d800d8c9d9ab |
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import unittest
import wradlib as wrl
from wradlib.io import dem
from wradlib.io import radolan
from wradlib.io import rainbow
from wradlib.io import CfRadial, OdimH5, create_xarray_dataarray
from wradlib.georef import epsg_to_osr
from subprocess import check_call
import numpy as np
import xarray as xr
import zlib
import gzip
import tempfile
import os
import datetime
import io
import sys
class DXTest(unittest.TestCase):
# testing functions related to read_dx
def test__get_timestamp_from_filename(self):
filename = 'raa00-dx_10488-200608050000-drs---bin'
self.assertEqual(radolan._get_timestamp_from_filename(filename),
datetime.datetime(2006, 8, 5, 0))
filename = 'raa00-dx_10488-0608050000-drs---bin'
self.assertEqual(radolan._get_timestamp_from_filename(filename),
datetime.datetime(2006, 8, 5, 0))
def test_get_dx_timestamp(self):
filename = 'raa00-dx_10488-200608050000-drs---bin'
self.assertEqual(radolan.get_dx_timestamp(filename).__str__(),
'2006-08-05 00:00:00+00:00')
filename = 'raa00-dx_10488-0608050000-drs---bin'
self.assertEqual(radolan.get_dx_timestamp(filename).__str__(),
'2006-08-05 00:00:00+00:00')
def test_parse_dx_header(self):
header = (b'DX021655109080608BY54213VS 2CO0CD2CS0EP0.30.30.40.50.'
b'50.40.40.4MS999~ 54( 120, 46) 43-31 44 44 50 50 54 52 '
b'52 42 39 36 ~ 53( 77, 39) 34-31 32 44 39 48 53 44 45 '
b'35 28 28 ~ 53( 98, 88)-31-31-31 53 53 52 53 53 53 32-31'
b' 18 ~ 57( 53, 25)-31-31 41 52 57 54 52 45 42 34 20 20 '
b'~ 55( 37, 38)-31-31 55 48 43 39 50 51 42 15 15 5 ~ '
b'56( 124, 19)-31 56 56 56 52 53 50 50 41 44 27 28 ~ '
b'47( 62, 40)-31-31 46 42 43 40 47 41 34 27 16 10 ~ '
b'46( 112, 62)-31-31 30 33 44 46 46 46 46 33 38 23 ~ '
b'44( 100, -54)-31-31 41 41 38 44 43 43 28 35 30 6 ~ '
b'47( 104, 75)-31-31 45 47 38 41 41 30 30 15 15 8 ^ '
b'58( 104, -56) 58 58 58 58 53 37 37 9 15-31-31-31 ^ '
b'58( 123, 16) 56-31 58 58 46 52 49 35 44 14 32 0 ^ '
b'57( 39, 38)-31 55 53 57 55 27 29 18 11 1 1-31 ^ '
b'54( 100, 85)-31-31 54 54 46 50-31-31 17-31-31-31 ^ '
b'53( 71, 39)-31-31 46 53 52 34 34 40 32 32 23 0 ^ '
b'53( 118, 49)-31-31 51 51 53 52 48 42 39 29 24-31 ` '
b'28( 90, 43)-31-31 27 27 28 27 27 19 24 19 9 9 ` '
b'42( 114, 53)-31-31 36 36 40 42 40 40 34 34 37 30 ` '
b'54( 51, 27)-31-31 49 49 54 51 45 39 40 34..')
head = ''
for c in io.BytesIO(header):
head += str(c.decode())
radolan.parse_dx_header(head)
def test_unpack_dx(self):
pass
def test_read_dx(self):
filename = 'dx/raa00-dx_10908-0806021655-fbg---bin.gz'
dxfile = wrl.util.get_wradlib_data_file(filename)
data, attrs = radolan.read_dx(dxfile)
class MiscTest(unittest.TestCase):
def test_write_polygon_to_text(self):
poly1 = [[0., 0., 0., 0.], [0., 1., 0., 1.], [1., 1., 0., 2.],
[0., 0., 0., 0.]]
poly2 = [[0., 0., 0., 0.], [0., 1., 0., 1.], [1., 1., 0., 2.],
[0., 0., 0., 0.]]
polygons = [poly1, poly2]
res = ['Polygon\n', '0 0\n', '0 0.000000 0.000000 0.000000 0.000000\n',
'1 0.000000 1.000000 0.000000 1.000000\n',
'2 1.000000 1.000000 0.000000 2.000000\n',
'3 0.000000 0.000000 0.000000 0.000000\n', '1 0\n',
'0 0.000000 0.000000 0.000000 0.000000\n',
'1 0.000000 1.000000 0.000000 1.000000\n',
'2 1.000000 1.000000 0.000000 2.000000\n',
'3 0.000000 0.000000 0.000000 0.000000\n', 'END\n']
tmp = tempfile.NamedTemporaryFile()
wrl.io.write_polygon_to_text(tmp.name, polygons)
self.assertEqual(open(tmp.name, 'r').readlines(), res)
def test_pickle(self):
arr = np.zeros((124, 248), dtype=np.int16)
tmp = tempfile.NamedTemporaryFile()
wrl.io.to_pickle(tmp.name, arr)
res = wrl.io.from_pickle(tmp.name)
self.assertTrue(np.allclose(arr, res))
@unittest.skipIf(sys.version_info < (3, 0),
"not supported in this python version")
def test_get_radiosonde(self):
date = datetime.datetime(2013, 7, 1, 15, 30)
res1 = np.array([(1000., 153., 17.4, 13.5, 13.5, 78., 78., 9.81, 200.,
6., 290.6, 318.5, 292.3)],
dtype=[('PRES', '<f8'), ('HGHT', '<f8'),
('TEMP', '<f8'), ('DWPT', '<f8'),
('FRPT', '<f8'), ('RELH', '<f8'),
('RELI', '<f8'), ('MIXR', '<f8'),
('DRCT', '<f8'), ('SKNT', '<f8'),
('THTA', '<f8'), ('THTE', '<f8'),
('THTV', '<f8')])
res2 = {'Station identifier': 'EDZE',
'Station number': 10410,
'Observation time': datetime.datetime(2013, 7, 1, 12, 0),
'Station latitude': 51.4,
'Station longitude': 6.96,
'Station elevation': 153.0,
'Showalter index': 6.1,
'Lifted index': 0.58,
'LIFT computed using virtual temperature': 0.52,
'SWEAT index': 77.7,
'K index': 11.7,
'Cross totals index': 13.7,
'Vertical totals index': 28.7,
'Totals totals index': 42.4,
'Convective Available Potential Energy': 6.9,
'CAPE using virtual temperature': 17.78,
'Convective Inhibition': 0.0,
'CINS using virtual temperature': 0.0,
'Equilibrum Level': 597.86,
'Equilibrum Level using virtual temperature': 589.7,
'Level of Free Convection': 931.41,
'LFCT using virtual temperature': 934.07,
'Bulk Richardson Number': 0.24,
'Bulk Richardson Number using CAPV': 0.62,
'Temp [K] of the Lifted Condensation Level': 284.16,
'Pres [hPa] of the Lifted Condensation Level': 934.07,
'Mean mixed layer potential temperature': 289.76,
'Mean mixed layer mixing ratio': 8.92,
'1000 hPa to 500 hPa thickness': 5537.0,
'Precipitable water [mm] for entire sounding': 19.02}
res3 = {'PRES': 'hPa', 'HGHT': 'm', 'TEMP': 'C', 'DWPT': 'C',
'FRPT': 'C', 'RELH': '%', 'RELI': '%', 'MIXR': 'g/kg',
'DRCT': 'deg', 'SKNT': 'knot', 'THTA': 'K', 'THTE': 'K',
'THTV': 'K'}
import urllib
try:
with self.assertRaises(ValueError):
data, meta = wrl.io.get_radiosonde(10411, date)
data, meta = wrl.io.get_radiosonde(10410, date)
except urllib.error.HTTPError:
print("HTTPError while retrieving radiosonde data, test skipped!")
else:
self.assertEqual(data[0], res1[0])
quant = meta.pop('quantity')
self.assertEqual(meta, res2)
self.assertEqual(quant, res3)
def test_get_membership_functions(self):
filename = wrl.util.get_wradlib_data_file('misc/msf_xband.gz')
msf = wrl.io.get_membership_functions(filename)
res = np.array(
[[6.000e+00, 5.000e+00, 1.000e+01, 3.500e+01, 4.000e+01],
[6.000e+00, -7.458e-01, -4.457e-01, 5.523e-01, 8.523e-01],
[6.000e+00, 7.489e-01, 7.689e-01, 9.236e-01, 9.436e-01],
[6.000e+00, -5.037e-01, -1.491e-01, -1.876e-01, 1.673e-01],
[6.000e+00, -5.000e+00, 0.000e+00, 4.000e+01, 2.000e+03]])
self.assertEqual(msf.shape, (11, 5, 55, 5))
np.testing.assert_array_equal(msf[0, :, 8, :], res)
class HDF5Test(unittest.TestCase):
def test_to_hdf5(self):
arr = np.zeros((124, 248), dtype=np.int16)
metadata = {'test': 12.}
tmp = tempfile.NamedTemporaryFile()
wrl.io.to_hdf5(tmp.name, arr, metadata=metadata)
res, resmeta = wrl.io.from_hdf5(tmp.name)
self.assertTrue(np.allclose(arr, res))
self.assertDictEqual(metadata, resmeta)
with self.assertRaises(KeyError):
wrl.io.from_hdf5(tmp.name, 'NotAvailable')
def test_read_safnwc(self):
filename = 'hdf5/SAFNWC_MSG3_CT___201304290415_BEL_________.h5'
safnwcfile = wrl.util.get_wradlib_data_file(filename)
wrl.io.read_safnwc(safnwcfile)
command = 'rm -rf test1.h5'
check_call(command, shell=True)
command = 'h5copy -i {} -o test1.h5 -s CT -d CT'.format(safnwcfile)
check_call(command, shell=True)
with self.assertRaises(KeyError):
wrl.io.read_safnwc('test1.h5')
def test_read_gpm(self):
filename1 = ('gpm/2A-CS-151E24S154E30S.GPM.Ku.V7-20170308.20141206-'
'S095002-E095137.004383.V05A.HDF5')
gpm_file = wrl.util.get_wradlib_data_file(filename1)
filename2 = ('hdf5/IDR66_20141206_094829.vol.h5')
gr2gpm_file = wrl.util.get_wradlib_data_file(filename2)
gr_data = wrl.io.read_generic_netcdf(gr2gpm_file)
dset = gr_data['dataset{0}'.format(2)]
nray_gr = dset['where']['nrays']
ngate_gr = dset['where']['nbins'].astype("i4")
elev_gr = dset['where']['elangle']
dr_gr = dset['where']['rscale']
lon0_gr = gr_data['where']['lon']
lat0_gr = gr_data['where']['lat']
alt0_gr = gr_data['where']['height']
coord = wrl.georef.sweep_centroids(nray_gr, dr_gr, ngate_gr, elev_gr)
coords = wrl.georef.spherical_to_proj(coord[..., 0],
np.degrees(coord[..., 1]),
coord[..., 2],
(lon0_gr, lat0_gr, alt0_gr))
lon = coords[..., 0]
lat = coords[..., 1]
bbox = wrl.zonalstats.get_bbox(lon, lat)
wrl.io.read_gpm(gpm_file, bbox)
def test_read_trmm(self):
# define TRMM data sets
trmm_2a23_file = wrl.util.get_wradlib_data_file(
'trmm/2A-CS-151E24S154E30S.TRMM.PR.2A23.20100206-'
'S111425-E111526.069662.7.HDF')
trmm_2a25_file = wrl.util.get_wradlib_data_file(
'trmm/2A-CS-151E24S154E30S.TRMM.PR.2A25.20100206-'
'S111425-E111526.069662.7.HDF')
filename2 = ('hdf5/IDR66_20141206_094829.vol.h5')
gr2gpm_file = wrl.util.get_wradlib_data_file(filename2)
gr_data = wrl.io.read_generic_netcdf(gr2gpm_file)
dset = gr_data['dataset{0}'.format(2)]
nray_gr = dset['where']['nrays']
ngate_gr = dset['where']['nbins'].astype("i4")
elev_gr = dset['where']['elangle']
dr_gr = dset['where']['rscale']
lon0_gr = gr_data['where']['lon']
lat0_gr = gr_data['where']['lat']
alt0_gr = gr_data['where']['height']
coord = wrl.georef.sweep_centroids(nray_gr, dr_gr, ngate_gr, elev_gr)
coords = wrl.georef.spherical_to_proj(coord[..., 0],
np.degrees(coord[..., 1]),
coord[..., 2],
(lon0_gr, lat0_gr, alt0_gr))
lon = coords[..., 0]
lat = coords[..., 1]
bbox = wrl.zonalstats.get_bbox(lon, lat)
wrl.io.read_trmm(trmm_2a23_file, trmm_2a25_file, bbox)
def test_read_generic_hdf5(self):
filename = ('hdf5/IDR66_20141206_094829.vol.h5')
h5_file = wrl.util.get_wradlib_data_file(filename)
wrl.io.read_generic_hdf5(h5_file)
def test_read_opera_hdf5(self):
filename = ('hdf5/IDR66_20141206_094829.vol.h5')
h5_file = wrl.util.get_wradlib_data_file(filename)
wrl.io.read_opera_hdf5(h5_file)
def test_read_gamic_hdf5(self):
ppi = ('hdf5/2014-08-10--182000.ppi.mvol')
rhi = ('hdf5/2014-06-09--185000.rhi.mvol')
filename = ('gpm/2A-CS-151E24S154E30S.GPM.Ku.V7-20170308.20141206-'
'S095002-E095137.004383.V05A.HDF5')
h5_file = wrl.util.get_wradlib_data_file(ppi)
wrl.io.read_gamic_hdf5(h5_file)
h5_file = wrl.util.get_wradlib_data_file(rhi)
wrl.io.read_gamic_hdf5(h5_file)
h5_file = wrl.util.get_wradlib_data_file(filename)
with self.assertRaises(KeyError):
wrl.io.read_gamic_hdf5(h5_file)
class RadolanTest(unittest.TestCase):
def test_get_radolan_header_token(self):
keylist = ['BY', 'VS', 'SW', 'PR', 'INT', 'GP',
'MS', 'LV', 'CS', 'MX', 'BG', 'ST',
'VV', 'MF', 'QN', 'VR', 'U']
head = radolan.get_radolan_header_token()
for key in keylist:
self.assertIsNone(head[key])
def test_get_radolan_header_token_pos(self):
header = ('RW030950100000814BY1620130VS 3SW 2.13.1PR E-01'
'INT 60GP 900x 900MS 58<boo,ros,emd,hnr,pro,ess,'
'asd,neu,nhb,oft,tur,isn,fbg,mem>')
test_head = radolan.get_radolan_header_token()
test_head['PR'] = (43, 48)
test_head['GP'] = (57, 66)
test_head['INT'] = (51, 55)
test_head['SW'] = (32, 41)
test_head['VS'] = (28, 30)
test_head['MS'] = (68, 128)
test_head['BY'] = (19, 26)
head = radolan.get_radolan_header_token_pos(header)
self.assertDictEqual(head, test_head)
header = ('RQ210945100000517BY1620162VS 2SW 1.7.2PR E-01'
'INT 60GP 900x 900VV 0MF 00000002QN 001'
'MS 67<bln,drs,eis,emd,ess,fbg,fld,fra,ham,han,muc,'
'neu,nhb,ros,tur,umd>')
test_head = {'BY': (19, 26), 'VS': (28, 30), 'SW': (32, 38),
'PR': (40, 45), 'INT': (48, 51), 'GP': (53, 62),
'MS': (85, 153), 'LV': None, 'CS': None, 'MX': None,
'BG': None, 'ST': None, 'VV': (64, 66), 'MF': (68, 77),
'QN': (79, 83), 'VR': None, 'U': None}
head = radolan.get_radolan_header_token_pos(header)
self.assertDictEqual(head, test_head)
def test_decode_radolan_runlength_line(self):
testarr = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9.,
9., 9., 9., 9., 9., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
testline = (b'\x10\x98\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9'
b'\xf9\xf9\xf9\xf9\xf9\xf9\xd9\n')
testline1 = (b'\x10\n')
testattrs = {'ncol': 460, 'nodataflag': 0}
arr = np.frombuffer(testline, np.uint8).astype(np.uint8)
line = radolan.decode_radolan_runlength_line(arr, testattrs)
self.assertTrue(np.allclose(line, testarr))
arr = np.frombuffer(testline1, np.uint8).astype(np.uint8)
line = radolan.decode_radolan_runlength_line(arr, testattrs)
self.assertTrue(np.allclose(line, [0] * 460))
def test_read_radolan_runlength_line(self):
testline = (b'\x10\x98\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9'
b'\xf9\xf9\xf9\xf9\xf9\xf9\xd9\n')
testarr = np.frombuffer(testline, np.uint8).astype(np.uint8)
fid, temp_path = tempfile.mkstemp()
tmp_id = open(temp_path, 'wb')
tmp_id.write(testline)
tmp_id.close()
tmp_id = open(temp_path, 'rb')
line = radolan.read_radolan_runlength_line(tmp_id)
tmp_id.close()
os.close(fid)
os.remove(temp_path)
self.assertTrue(np.allclose(line, testarr))
def test_decode_radolan_runlength_array(self):
filename = 'radolan/misc/raa00-pc_10015-1408030905-dwd---bin.gz'
pg_file = wrl.util.get_wradlib_data_file(filename)
pg_fid = radolan.get_radolan_filehandle(pg_file)
header = radolan.read_radolan_header(pg_fid)
attrs = radolan.parse_dwd_composite_header(header)
data = radolan.read_radolan_binary_array(pg_fid, attrs['datasize'])
attrs['nodataflag'] = 255
arr = radolan.decode_radolan_runlength_array(data, attrs)
self.assertEqual(arr.shape, (460, 460))
def test_read_radolan_binary_array(self):
filename = 'radolan/misc/raa01-rw_10000-1408030950-dwd---bin.gz'
rw_file = wrl.util.get_wradlib_data_file(filename)
rw_fid = radolan.get_radolan_filehandle(rw_file)
header = radolan.read_radolan_header(rw_fid)
attrs = radolan.parse_dwd_composite_header(header)
data = radolan.read_radolan_binary_array(rw_fid, attrs['datasize'])
self.assertEqual(len(data), attrs['datasize'])
rw_fid = radolan.get_radolan_filehandle(rw_file)
header = radolan.read_radolan_header(rw_fid)
attrs = radolan.parse_dwd_composite_header(header)
with self.assertRaises(IOError):
radolan.read_radolan_binary_array(rw_fid, attrs['datasize'] + 10)
def test_get_radolan_filehandle(self):
filename = 'radolan/misc/raa01-rw_10000-1408030950-dwd---bin.gz'
rw_file = wrl.util.get_wradlib_data_file(filename)
rw_fid = radolan.get_radolan_filehandle(rw_file)
self.assertEqual(rw_file, rw_fid.name)
command = 'gunzip -k -f {}'.format(rw_file)
check_call(command, shell=True)
rw_fid = radolan.get_radolan_filehandle(rw_file[:-3])
self.assertEqual(rw_file[:-3], rw_fid.name)
def test_read_radolan_header(self):
rx_header = (b'RW030950100000814BY1620130VS 3SW 2.13.1PR E-01'
b'INT 60GP 900x 900MS 58<boo,ros,emd,hnr,pro,ess,'
b'asd,neu,nhb,oft,tur,isn,fbg,mem>')
buf = io.BytesIO(rx_header)
with self.assertRaises(EOFError):
radolan.read_radolan_header(buf)
buf = io.BytesIO(rx_header + b"\x03")
header = radolan.read_radolan_header(buf)
self.assertEqual(header, rx_header.decode())
def test_parse_dwd_composite_header(self):
rx_header = ('RW030950100000814BY1620130VS 3SW 2.13.1PR E-01INT 60'
'GP 900x 900MS 58<boo,ros,emd,hnr,pro,ess,asd,neu,nhb,'
'oft,tur,isn,fbg,mem>')
test_rx = {'maxrange': '150 km',
'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'pro',
'ess', 'asd', 'neu', 'nhb', 'oft',
'tur', 'isn', 'fbg', 'mem'],
'nrow': 900, 'intervalseconds': 3600, 'precision': 0.1,
'datetime': datetime.datetime(2014, 8, 3, 9, 50),
'ncol': 900,
'radolanversion': '2.13.1', 'producttype': 'RW',
'radarid': '10000',
'datasize': 1620001, }
pg_header = ('PG030905100000814BY20042LV 6 1.0 19.0 28.0 37.0 46.0 '
'55.0CS0MX 0MS 82<boo,ros,emd,hnr,pro,ess,asd,neu,nhb,'
'oft,tur,isn,fbg,mem,czbrd> are used, BG460460')
test_pg = {
'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'pro', 'ess', 'asd',
'neu',
'nhb', 'oft', 'tur', 'isn', 'fbg', 'mem',
'czbrd'],
'nrow': 460, 'level': [1., 19., 28., 37., 46., 55.],
'datetime': datetime.datetime(2014, 8, 3, 9, 5), 'ncol': 460,
'producttype': 'PG', 'radarid': '10000', 'nlevel': 6,
'indicator': 'near ground level', 'imagecount': 0,
'datasize': 19889}
rq_header = ('RQ210945100000517BY1620162VS 2SW 1.7.2PR E-01'
'INT 60GP 900x 900VV 0MF 00000002QN 001'
'MS 67<bln,drs,eis,emd,ess,fbg,fld,fra,ham,han,muc,'
'neu,nhb,ros,tur,umd>')
test_rq = {'producttype': 'RQ',
'datetime': datetime.datetime(2017, 5, 21, 9, 45),
'radarid': '10000', 'datasize': 1620008,
'maxrange': '128 km', 'radolanversion': '1.7.2',
'precision': 0.1, 'intervalseconds': 3600,
'nrow': 900, 'ncol': 900,
'radarlocations': ['bln', 'drs', 'eis', 'emd', 'ess',
'fbg', 'fld', 'fra', 'ham', 'han',
'muc', 'neu', 'nhb', 'ros', 'tur',
'umd'],
'predictiontime': 0, 'moduleflag': 2,
'quantification': 1}
sq_header = ('SQ102050100000814BY1620231VS 3SW 2.13.1PR E-01'
'INT 360GP 900x 900MS 62<boo,ros,emd,hnr,umd,pro,ess,'
'asd,neu,nhb,oft,tur,isn,fbg,mem> ST 92<asd 6,boo 6,'
'emd 6,ess 6,fbg 6,hnr 6,isn 6,mem 6,neu 6,nhb 6,oft 6,'
'pro 6,ros 6,tur 6,umd 6>')
test_sq = {'producttype': 'SQ',
'datetime': datetime.datetime(2014, 8, 10, 20, 50),
'radarid': '10000', 'datasize': 1620001,
'maxrange': '150 km', 'radolanversion': '2.13.1',
'precision': 0.1, 'intervalseconds': 21600, 'nrow': 900,
'ncol': 900,
'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'umd', 'pro',
'ess', 'asd', 'neu', 'nhb', 'oft', 'tur',
'isn', 'fbg', 'mem'],
'radardays': ['asd 6', 'boo 6', 'emd 6', 'ess 6', 'fbg 6',
'hnr 6', 'isn 6', 'mem 6', 'neu 6', 'nhb 6',
'oft 6', 'pro 6', 'ros 6', 'tur 6', 'umd 6']}
yw_header = ('YW070235100001014BY1980156VS 3SW 2.18.3PR E-02'
'INT 5U0GP1100x 900MF 00000000VR2017.002'
'MS 61<boo,ros,emd,hnr,umd,pro,ess,asd,neu,'
'nhb,oft,tur,isn,fbg,mem>')
test_yw = {'producttype': 'YW',
'datetime': datetime.datetime(2014, 10, 7, 2, 35),
'radarid': '10000', 'datasize': 1980000,
'maxrange': '150 km', 'radolanversion': '2.18.3',
'precision': 0.01, 'intervalseconds': 300,
'intervalunit': 0, 'nrow': 1100, 'ncol': 900,
'moduleflag': 0, 'reanalysisversion': '2017.002',
'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'umd', 'pro',
'ess', 'asd', 'neu', 'nhb', 'oft', 'tur',
'isn', 'fbg', 'mem']}
rx = radolan.parse_dwd_composite_header(rx_header)
pg = radolan.parse_dwd_composite_header(pg_header)
rq = radolan.parse_dwd_composite_header(rq_header)
sq = radolan.parse_dwd_composite_header(sq_header)
yw = radolan.parse_dwd_composite_header(yw_header)
for key, value in rx.items():
self.assertEqual(value, test_rx[key])
for key, value in pg.items():
if type(value) == np.ndarray:
self.assertTrue(np.allclose(value, test_pg[key]))
else:
self.assertEqual(value, test_pg[key])
for key, value in rq.items():
if type(value) == np.ndarray:
self.assertTrue(np.allclose(value, test_rq[key]))
else:
self.assertEqual(value, test_rq[key])
for key, value in sq.items():
if type(value) == np.ndarray:
self.assertTrue(np.allclose(value, test_sq[key]))
else:
self.assertEqual(value, test_sq[key])
for key, value in yw.items():
if type(value) == np.ndarray:
self.assertTrue(np.allclose(value, test_yw[key]))
else:
self.assertEqual(value, test_yw[key])
def test_read_radolan_composite(self):
filename = 'radolan/misc/raa01-rw_10000-1408030950-dwd---bin.gz'
rw_file = wrl.util.get_wradlib_data_file(filename)
test_attrs = {'maxrange': '150 km',
'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'pro',
'ess', 'asd', 'neu', 'nhb', 'oft',
'tur', 'isn', 'fbg', 'mem'],
'nrow': 900, 'intervalseconds': 3600,
'precision': 0.1,
'datetime': datetime.datetime(2014, 8, 3, 9, 50),
'ncol': 900, 'radolanversion': '2.13.1',
'producttype': 'RW', 'nodataflag': -9999,
'datasize': 1620000, 'radarid': '10000'}
# test for complete file
data, attrs = radolan.read_radolan_composite(rw_file)
self.assertEqual(data.shape, (900, 900))
for key, value in attrs.items():
if type(value) == np.ndarray:
self.assertIn(value.dtype, [np.int32, np.int64])
else:
self.assertEqual(value, test_attrs[key])
# Do the same for the case where a file handle is passed
# instead of a file name
with gzip.open(rw_file) as fh:
data, attrs = radolan.read_radolan_composite(fh)
self.assertEqual(data.shape, (900, 900))
for key, value in attrs.items():
if type(value) == np.ndarray:
self.assertIn(value.dtype, [np.int32, np.int64])
else:
self.assertEqual(value, test_attrs[key])
# test for loaddata=False
data, attrs = radolan.read_radolan_composite(rw_file, loaddata=False)
self.assertEqual(data, None)
for key, value in attrs.items():
if type(value) == np.ndarray:
self.assertEqual(value.dtype, np.int64)
else:
self.assertEqual(value, test_attrs[key])
with self.assertRaises(KeyError):
attrs['nodataflag']
filename = 'radolan/misc/raa01-rx_10000-1408102050-dwd---bin.gz'
rx_file = wrl.util.get_wradlib_data_file(filename)
# test for loaddata=False
data, attrs = radolan.read_radolan_composite(rx_file)
filename = 'radolan/misc/raa00-pc_10015-1408030905-dwd---bin.gz'
pc_file = wrl.util.get_wradlib_data_file(filename)
# test for loaddata=False
data, attrs = radolan.read_radolan_composite(pc_file)
class RainbowTest(unittest.TestCase):
def test_read_rainbow(self):
filename = 'rainbow/2013070308340000dBuZ.azi'
rb_file = wrl.util.get_wradlib_data_file(filename)
with self.assertRaises(IOError):
rainbow.read_rainbow('test')
# Test reading from file name
rb_dict = rainbow.read_rainbow(rb_file)
self.assertEqual(rb_dict[u'volume'][u'@datetime'],
u'2013-07-03T08:33:55')
# Test reading from file handle
with open(rb_file, 'rb') as rb_fh:
rb_dict = rainbow.read_rainbow(rb_fh)
self.assertEqual(rb_dict[u'volume'][u'@datetime'],
u'2013-07-03T08:33:55')
def test_find_key(self):
indict = {'A': {'AA': {'AAA': 0, 'X': 1},
'AB': {'ABA': 2, 'X': 3},
'AC': {'ACA': 4, 'X': 5},
'AD': [{'ADA': 4, 'X': 2}]}}
outdict = [{'X': 1, 'AAA': 0}, {'X': 5, 'ACA': 4},
{'ABA': 2, 'X': 3}, {'ADA': 4, 'X': 2}]
try:
self.assertCountEqual(list(rainbow.find_key('X', indict)),
outdict)
self.assertCountEqual(list(rainbow.find_key('Y', indict)),
[])
except AttributeError:
self.assertItemsEqual(list(rainbow.find_key('X', indict)),
outdict)
self.assertItemsEqual(list(rainbow.find_key('Y', indict)),
[])
def test_decompress(self):
dstring = b'very special compressed string'
cstring = zlib.compress(dstring)
self.assertEqual(rainbow.decompress(cstring), dstring)
def test_get_rb_data_layout(self):
self.assertEqual(rainbow.get_rb_data_layout(8), (1, '>u1'))
self.assertEqual(rainbow.get_rb_data_layout(16), (2, '>u2'))
self.assertEqual(rainbow.get_rb_data_layout(32), (4, '>u4'))
with self.assertRaises(ValueError):
rainbow.get_rb_data_layout(128)
@unittest.skipIf(sys.version_info < (3, 3),
"not supported in this python version")
def test_get_rb_data_layout_big(self):
from unittest.mock import patch
with patch('sys.byteorder', 'big'):
self.assertEqual(rainbow.get_rb_data_layout(8), (1, '<u1'))
self.assertEqual(rainbow.get_rb_data_layout(16), (2, '<u2'))
self.assertEqual(rainbow.get_rb_data_layout(32), (4, '<u4'))
def test_get_rb_data_attribute(self):
xmltodict = wrl.util.import_optional('xmltodict')
data = xmltodict.parse(('<slicedata time="13:30:05" date="2013-04-26">'
'#<rayinfo refid="startangle" blobid="0" '
'rays="361" depth="16"/> '
'#<rawdata blobid="1" rays="361" type="dBuZ" '
'bins="400" min="-31.5" max="95.5" '
'depth="8"/> #</slicedata>'))
data = list(rainbow.find_key('@blobid', data))
self.assertEqual(rainbow.get_rb_data_attribute(data[0], 'blobid'), 0)
self.assertEqual(rainbow.get_rb_data_attribute(data[1], 'blobid'), 1)
self.assertEqual(rainbow.get_rb_data_attribute(data[0], 'rays'), 361)
self.assertEqual(rainbow.get_rb_data_attribute(data[1], 'rays'), 361)
self.assertEqual(rainbow.get_rb_data_attribute(data[1], 'bins'), 400)
with self.assertRaises(KeyError):
rainbow.get_rb_data_attribute(data[0], 'Nonsense')
self.assertEqual(rainbow.get_rb_data_attribute(data[0], 'depth'), 16)
def test_get_rb_blob_attribute(self):
xmltodict = wrl.util.import_optional('xmltodict')
xmldict = xmltodict.parse(
'<BLOB blobid="0" size="737" compression="qt"></BLOB>')
self.assertEqual(rainbow.get_rb_blob_attribute(xmldict, 'compression'),
'qt')
self.assertEqual(rainbow.get_rb_blob_attribute(xmldict, 'size'), '737')
self.assertEqual(rainbow.get_rb_blob_attribute(xmldict, 'blobid'), '0')
with self.assertRaises(KeyError):
rainbow.get_rb_blob_attribute(xmldict, 'Nonsense')
def test_get_rb_data_shape(self):
xmltodict = wrl.util.import_optional('xmltodict')
data = xmltodict.parse(('<slicedata time="13:30:05" date="2013-04-26">'
'#<rayinfo refid="startangle" blobid="0" '
'rays="361" depth="16"/> #<rawdata blobid="1" '
'rays="361" type="dBuZ" bins="400" '
'min="-31.5" max="95.5" depth="8"/> #<flagmap '
'blobid="2" rows="800" type="dBuZ" '
'columns="400" min="-31.5" max="95.5" '
'depth="6"/> #<defect blobid="3" type="dBuZ" '
'columns="400" min="-31.5" max="95.5" '
'depth="6"/> #<rawdata2 '
'blobid="4" rows="800" type="dBuZ" '
'columns="400" min="-31.5" max="95.5" '
'depth="8"/> #</slicedata>'))
data = list(rainbow.find_key('@blobid', data))
self.assertEqual(rainbow.get_rb_data_shape(data[0]), 361)
self.assertEqual(rainbow.get_rb_data_shape(data[1]), (361, 400))
self.assertEqual(rainbow.get_rb_data_shape(data[2]), (800, 400, 6))
self.assertEqual(rainbow.get_rb_data_shape(data[4]), (800, 400))
with self.assertRaises(KeyError):
rainbow.get_rb_data_shape(data[3])
def test_map_rb_data(self):
indata = b'0123456789'
outdata8 = np.array([48, 49, 50, 51, 52, 53, 54, 55, 56, 57],
dtype=np.uint8)
outdata16 = np.array([12337, 12851, 13365, 13879, 14393],
dtype=np.uint16)
outdata32 = np.array([808530483, 875902519], dtype=np.uint32)
self.assertTrue(np.allclose(rainbow.map_rb_data(indata, 8), outdata8))
self.assertTrue(np.allclose(rainbow.map_rb_data(indata, 16),
outdata16))
self.assertTrue(np.allclose(rainbow.map_rb_data(indata, 32),
outdata32))
flagdata = b'1'
self.assertTrue(np.allclose(rainbow.map_rb_data(flagdata, 1),
[0, 0, 1, 1, 0, 0, 0, 1]))
def test_get_rb_blob_data(self):
datastring = b'<BLOB blobid="0" size="737" compression="qt"></BLOB>'
with self.assertRaises(EOFError):
rainbow.get_rb_blob_data(datastring, 1)
def test_get_rb_blob_from_file(self):
filename = 'rainbow/2013070308340000dBuZ.azi'
rb_file = wrl.util.get_wradlib_data_file(filename)
rbdict = rainbow.read_rainbow(rb_file, loaddata=False)
rbblob = rbdict['volume']['scan']['slice']['slicedata']['rawdata']
# Check reading from file handle
with open(rb_file, 'rb') as rb_fh:
data = rainbow.get_rb_blob_from_file(rb_fh, rbblob)
self.assertEqual(data.shape[0], int(rbblob['@rays']))
self.assertEqual(data.shape[1], int(rbblob['@bins']))
with self.assertRaises(IOError):
rainbow.get_rb_blob_from_file('rb_fh', rbblob)
# Check reading from file path
data = rainbow.get_rb_blob_from_file(rb_file, rbblob)
self.assertEqual(data.shape[0], int(rbblob['@rays']))
self.assertEqual(data.shape[1], int(rbblob['@bins']))
with self.assertRaises(IOError):
rainbow.get_rb_blob_from_file('rb_fh', rbblob)
def test_get_rb_file_as_string(self):
filename = 'rainbow/2013070308340000dBuZ.azi'
rb_file = wrl.util.get_wradlib_data_file(filename)
with open(rb_file, 'rb') as rb_fh:
rb_string = rainbow.get_rb_file_as_string(rb_fh)
self.assertTrue(rb_string)
with self.assertRaises(IOError):
rainbow.get_rb_file_as_string('rb_fh')
def test_get_rb_header(self):
rb_header = (b'<volume version="5.34.16" '
b'datetime="2013-07-03T08:33:55"'
b' type="azi" owner="RainAnalyzer"> '
b'<scan name="analyzer.azi" time="08:34:00" '
b'date="2013-07-03">')
buf = io.BytesIO(rb_header)
with self.assertRaises(IOError):
rainbow.get_rb_header(buf)
filename = 'rainbow/2013070308340000dBuZ.azi'
rb_file = wrl.util.get_wradlib_data_file(filename)
with open(rb_file, 'rb') as rb_fh:
rb_header = rainbow.get_rb_header(rb_fh)
self.assertEqual(rb_header['volume']['@version'], '5.34.16')
class RasterTest(unittest.TestCase):
def test_gdal_create_dataset(self):
testfunc = wrl.io.gdal_create_dataset
tmp = tempfile.NamedTemporaryFile(mode='w+b').name
with self.assertRaises(TypeError):
testfunc('AIG', tmp)
from osgeo import gdal
with self.assertRaises(TypeError):
testfunc('AAIGrid', tmp, cols=10, rows=10, bands=1,
gdal_type=gdal.GDT_Float32)
testfunc('GTiff', tmp, cols=10, rows=10, bands=1,
gdal_type=gdal.GDT_Float32)
testfunc('GTiff', tmp, cols=10, rows=10, bands=1,
gdal_type=gdal.GDT_Float32, remove=True)
def test_write_raster_dataset(self):
filename = 'geo/bonn_new.tif'
geofile = wrl.util.get_wradlib_data_file(filename)
ds = wrl.io.open_raster(geofile)
wrl.io.write_raster_dataset(geofile + 'asc', ds, 'AAIGrid')
wrl.io.write_raster_dataset(geofile + 'asc', ds, 'AAIGrid',
remove=True)
with self.assertRaises(TypeError):
wrl.io.write_raster_dataset(geofile + 'asc1', ds, 'AIG')
def test_open_raster(self):
filename = 'geo/bonn_new.tif'
geofile = wrl.util.get_wradlib_data_file(filename)
wrl.io.open_raster(geofile, 'GTiff')
class VectorTest(unittest.TestCase):
def test_open_vector(self):
filename = 'shapefiles/agger/agger_merge.shp'
geofile = wrl.util.get_wradlib_data_file(filename)
wrl.io.open_vector(geofile)
wrl.io.open_vector(geofile, 'ESRI Shapefile')
class IrisTest(unittest.TestCase):
def test_open_iris(self):
filename = 'sigmet/cor-main131125105503.RAW2049'
sigmetfile = wrl.util.get_wradlib_data_file(filename)
data = wrl.io.iris.IrisRawFile(sigmetfile, loaddata=False)
self.assertIsInstance(data.rh, wrl.io.iris.IrisRecord)
self.assertIsInstance(data.fh, np.memmap)
data = wrl.io.iris.IrisRawFile(sigmetfile, loaddata=True)
self.assertEqual(data._record_number, 511)
self.assertEqual(data.filepos, 3139584)
def test_read_iris(self):
filename = 'sigmet/cor-main131125105503.RAW2049'
sigmetfile = wrl.util.get_wradlib_data_file(filename)
data = wrl.io.read_iris(sigmetfile, loaddata=True, rawdata=True)
data_keys = ['product_hdr', 'product_type', 'ingest_header', 'nsweeps',
'nrays', 'nbins', 'data_types', 'data',
'raw_product_bhdrs']
product_hdr_keys = ['structure_header', 'product_configuration',
'product_end']
ingest_hdr_keys = ['structure_header', 'ingest_configuration',
'task_configuration', 'spare_0', 'gparm',
'reserved']
data_types = ['DB_DBZ', 'DB_VEL', 'DB_ZDR', 'DB_KDP', 'DB_PHIDP',
'DB_RHOHV', 'DB_HCLASS']
self.assertEqual(list(data.keys()), data_keys)
self.assertEqual(list(data['product_hdr'].keys()), product_hdr_keys)
self.assertEqual(list(data['ingest_header'].keys()), ingest_hdr_keys)
self.assertEqual(data['data_types'], data_types)
data_types = ['DB_DBZ', 'DB_VEL']
selected_data = [1, 3, 8]
loaddata = {'moment': data_types, 'sweep': selected_data}
data = wrl.io.read_iris(sigmetfile, loaddata=loaddata, rawdata=True)
self.assertEqual(list(data['data'][1]['sweep_data'].keys()),
data_types)
self.assertEqual(list(data['data'].keys()), selected_data)
def test_IrisRecord(self):
filename = 'sigmet/cor-main131125105503.RAW2049'
sigmetfile = wrl.util.get_wradlib_data_file(filename)
data = wrl.io.IrisRecordFile(sigmetfile, loaddata=False)
# reset record after init
data.init_record(1)
self.assertIsInstance(data.rh, wrl.io.iris.IrisRecord)
self.assertEqual(data.rh.pos, 0)
self.assertEqual(data.rh.recpos, 0)
self.assertEqual(data.rh.recnum, 1)
rlist = [23, 0, 4, 0, 20, 19, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
np.testing.assert_array_equal(data.rh.read(10, 2), rlist)
self.assertEqual(data.rh.pos, 20)
self.assertEqual(data.rh.recpos, 10)
data.rh.pos -= 20
np.testing.assert_array_equal(data.rh.read(20, 1), rlist)
data.rh.recpos -= 10
np.testing.assert_array_equal(data.rh.read(5, 4), rlist)
def test_decode_bin_angle(self):
self.assertEqual(wrl.io.iris.decode_bin_angle(20000, 2), 109.86328125)
self.assertEqual(wrl.io.iris.decode_bin_angle(2000000000, 4),
167.63806343078613)
def decode_array(self):
data = np.arange(0, 11)
np.testing.assert_array_equal(wrl.io.iris.decode_array(data),
[0., 1., 2., 3., 4., 5.,
6., 7., 8., 9., 10.])
np.testing.assert_array_equal(wrl.io.iris.decode_array(data,
offset=1.),
[1., 2., 3., 4., 5., 6.,
7., 8., 9., 10., 11.])
np.testing.assert_array_equal(wrl.io.iris.decode_array(data,
scale=0.5),
[0, 2., 4., 6., 8., 10.,
12., 14., 16., 18., 20.])
np.testing.assert_array_equal(wrl.io.iris.decode_array(data, offset=1.,
scale=0.5),
[2., 4., 6., 8., 10., 12.,
14., 16., 18., 20., 22.])
np.testing.assert_array_equal(wrl.io.iris.decode_array(data, offset=1.,
scale=0.5,
offset2=-2.),
[0, 2., 4., 6., 8., 10.,
12., 14., 16., 18., 20.])
data = np.array([0, 1, 255, 1000, 9096, 22634, 34922, 50000, 65534],
dtype=np.uint16)
np.testing.assert_array_equal(wrl.io.iris.decode_array(data,
scale=1000,
tofloat=True),
[0., 0.001, 0.255, 1., 10., 100.,
800., 10125.312, 134184.96])
def test_decode_kdp(self):
np.testing.assert_array_almost_equal(
wrl.io.iris.decode_kdp(np.arange(-5, 5, dtype='int8'),
wavelength=10.),
[12.243229, 12.880858, 13.551695,
14.257469, 15., -0., -15., -14.257469,
-13.551695, -12.880858])
def test_decode_phidp(self):
np.testing.assert_array_almost_equal(
wrl.io.iris.decode_phidp(np.arange(0, 10, dtype='uint8'),
scale=254., offset=-1),
[-0.70866142, 0., 0.70866142, 1.41732283, 2.12598425, 2.83464567,
3.54330709, 4.2519685, 4.96062992, 5.66929134])
def test_decode_phidp2(self):
np.testing.assert_array_almost_equal(
wrl.io.iris.decode_phidp2(np.arange(0, 10, dtype='uint16'),
scale=65534., offset=-1),
[-0.00549333, 0., 0.00549333, 0.01098666, 0.01648, 0.02197333,
0.02746666, 0.03295999, 0.03845332, 0.04394665])
def test_decode_sqi(self):
np.testing.assert_array_almost_equal(
wrl.io.iris.decode_sqi(np.arange(0, 10, dtype='uint8'),
scale=253., offset=-1),
[np.nan, 0., 0.06286946, 0.08891084, 0.1088931, 0.12573892,
0.14058039, 0.1539981, 0.16633696, 0.17782169])
def test_decode_time(self):
timestring = b'\xd1\x9a\x00\x000\t\xdd\x07\x0b\x00\x19\x00'
self.assertEqual(wrl.io.iris.decode_time(timestring).isoformat(),
'2013-11-25T11:00:35.352000')
def test_decode_string(self):
self.assertEqual(wrl.io.iris.decode_string(b'EEST\x00\x00\x00\x00'),
'EEST')
def test__get_fmt_string(self):
fmt = '12sHHi12s12s12s6s12s12sHiiiiiiiiii2sH12sHB1shhiihh80s16s12s48s'
self.assertEqual(wrl.io.iris._get_fmt_string(
wrl.io.iris.PRODUCT_CONFIGURATION), fmt)
class NetcdfTest(unittest.TestCase):
def test_read_edge_netcdf(self):
filename = 'netcdf/edge_netcdf.nc'
edgefile = wrl.util.get_wradlib_data_file(filename)
data, attrs = wrl.io.read_edge_netcdf(edgefile)
data, attrs = wrl.io.read_edge_netcdf(edgefile, enforce_equidist=True)
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
with self.assertRaises(Exception):
wrl.io.read_edge_netcdf(ncfile)
with self.assertRaises(Exception):
wrl.io.read_edge_netcdf('test')
def test_read_generic_netcdf(self):
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
wrl.io.read_generic_netcdf(ncfile)
with self.assertRaises(IOError):
wrl.io.read_generic_netcdf('test')
filename = 'sigmet/cor-main131125105503.RAW2049'
ncfile = wrl.util.get_wradlib_data_file(filename)
with self.assertRaises(IOError):
wrl.io.read_generic_netcdf(ncfile)
filename = 'hdf5/IDR66_20100206_111233.vol.h5'
ncfile = wrl.util.get_wradlib_data_file(filename)
wrl.io.read_generic_netcdf(ncfile)
filename = 'netcdf/example_cfradial_ppi.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
wrl.io.read_generic_netcdf(ncfile)
class XarrayTests(unittest.TestCase):
def test_create_xarray_dataarray(self):
img = np.zeros((360, 10), dtype=np.float32)
r = np.arange(0, 100000, 10000)
az = np.arange(0, 360)
th = np.zeros_like(az)
proj = epsg_to_osr(4326)
with self.assertRaises(TypeError):
create_xarray_dataarray(img)
create_xarray_dataarray(img, r, az, th, proj=proj)
def test_iter(self):
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
cf = CfRadial(ncfile)
i = 0
for item in cf:
i += 1
self.assertEqual(i, 10)
def test_del(self):
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
cf = CfRadial(ncfile)
for k in list(cf):
del cf[k]
self.assertEqual(cf._source, {})
def test_read_cfradial(self):
sweep_names = ['sweep_1', 'sweep_2', 'sweep_3', 'sweep_4',
'sweep_5', 'sweep_6', 'sweep_7', 'sweep_8',
'sweep_9']
fixed_angles = np.array([0.4999, 1.0986, 1.8018, 2.5983, 3.598,
4.7021, 6.4984, 9.1022, 12.7991])
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
cf = CfRadial(ncfile)
np.testing.assert_array_almost_equal(cf.root.sweep_fixed_angle.values,
fixed_angles)
cfnames, cfangles = zip(*cf.sweeps)
self.assertEqual(sweep_names, list(cfnames))
np.testing.assert_array_almost_equal(fixed_angles, np.array(cfangles))
self.assertEqual(cf.sweep, 9)
self.assertSequenceEqual(cf.location, (120.43350219726562,
22.52669906616211,
45.00000178813934))
self.assertEqual(cf.version, '1.2')
self.assertEqual(cf.Conventions, 'CF/Radial instrument_parameters '
'radar_parameters radar_calibration '
'geometry_correction')
self.assertEqual(repr(cf), repr(cf._source))
def test_read_odim(self):
fixed_angles = np.array([0.3, 0.9, 1.8, 3.3, 6.])
filename = 'hdf5/20130429043000.rad.bewid.pvol.dbzh.scan1.hdf'
h5file = wrl.util.get_wradlib_data_file(filename)
cf = OdimH5(h5file)
np.testing.assert_array_almost_equal(cf.root.sweep_fixed_angle.values,
fixed_angles)
filename = 'hdf5/knmi_polar_volume.h5'
h5file = wrl.util.get_wradlib_data_file(filename)
cf = OdimH5(h5file)
with self.assertRaises(AttributeError):
cf = OdimH5(h5file, flavour='None')
def test_read_gamic(self):
time_cov = ('2014-08-10T18:23:35Z', '2014-08-10T18:24:05Z')
filename = 'hdf5/2014-08-10--182000.ppi.mvol'
h5file = wrl.util.get_wradlib_data_file(filename)
with self.assertRaises(AttributeError):
OdimH5(h5file)
cf = OdimH5(h5file, flavour='GAMIC')
self.assertEqual(str(cf.root.time_coverage_start.values),
time_cov[0])
self.assertEqual(str(cf.root.time_coverage_end.values),
time_cov[1])
filename = 'hdf5/2014-06-09--185000.rhi.mvol'
h5file = wrl.util.get_wradlib_data_file(filename)
cf = OdimH5(h5file, flavour='GAMIC')
cf = OdimH5(h5file, flavour='GAMIC', strict=False)
def test_odim_roundtrip(self):
filename = 'hdf5/20130429043000.rad.bewid.pvol.dbzh.scan1.hdf'
odimfile = wrl.util.get_wradlib_data_file(filename)
cf = OdimH5(odimfile)
tmp = tempfile.NamedTemporaryFile(mode='w+b').name
cf.to_odim(tmp)
cf2 = OdimH5(tmp)
xr.testing.assert_equal(cf.root, cf2.root)
for i in range(1, 6):
key = 'sweep_{}'.format(i)
xr.testing.assert_equal(cf[key], cf2[key])
# test write after del, file lockage
del cf2
cf.to_odim(tmp)
def test_odim_roundtrip_nonstrict(self):
filename = 'hdf5/20130429043000.rad.bewid.pvol.dbzh.scan1.hdf'
odimfile = wrl.util.get_wradlib_data_file(filename)
cf = OdimH5(odimfile, strict=False)
tmp = tempfile.NamedTemporaryFile(mode='w+b').name
cf.to_odim(tmp)
cf2 = OdimH5(tmp, strict=False)
xr.testing.assert_equal(cf.root, cf2.root)
for i in range(1, 6):
key = 'sweep_{}'.format(i)
xr.testing.assert_equal(cf[key], cf2[key])
# test write after del, file lockage
del cf2
cf.to_odim(tmp)
def test_cfradial_roundtrip(self):
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
cf = CfRadial(ncfile)
tmp = tempfile.NamedTemporaryFile(mode='w+b').name
cf.to_cfradial2(tmp)
cf2 = CfRadial(tmp)
xr.testing.assert_equal(cf.root, cf2.root)
for i in range(1, 10):
key = 'sweep_{}'.format(i)
xr.testing.assert_equal(cf[key], cf2[key])
# test write after del, file lockage
del cf2
cf.to_cfradial2(tmp)
def test_cfradial_odim_roundtrip(self):
filename = 'netcdf/cfrad.20080604_002217_000_SPOL_v36_SUR.nc'
ncfile = wrl.util.get_wradlib_data_file(filename)
cf = CfRadial(ncfile)
tmp = tempfile.NamedTemporaryFile(mode='w+b').name
cf.to_odim(tmp)
cf2 = OdimH5(tmp)
xr.testing.assert_allclose(cf.root.sweep_fixed_angle,
cf2.root.sweep_fixed_angle)
xr.testing.assert_allclose(cf.root.time_coverage_start,
cf2.root.time_coverage_start)
drop = ['longitude', 'latitude', 'altitude', 'sweep_mode']
xr.testing.assert_allclose(cf['sweep_1'].drop(drop).sweep_number,
cf2['sweep_1'].drop(drop).sweep_number)
tmp1 = tempfile.NamedTemporaryFile(mode='w+b').name
cf2.to_cfradial2(tmp1)
cf3 = CfRadial(tmp1)
xr.testing.assert_allclose(cf.root.time_coverage_start,
cf3.root.time_coverage_start)
class DemTest(unittest.TestCase):
def test_get_srtm(self):
targets = ["N51W001", "N51E000", "N51E001",
"N52W001", "N52E000", "N52E001"]
targets = ["%s.hgt.zip" % (f) for f in targets]
opts = {'region': 'Eurasia'}
extent = [-0.3, 1.5, 51.4, 52.5]
datasets = dem.get_srtm(extent, merge=False, download=opts)
filelist = [os.path.basename(d.GetFileList()[0]) for d in datasets]
self.assertEqual(targets, filelist)
targets = ["S02E015", "S02E016", "S01E015",
"S01E016", "N00E015", "N00E016"]
targets = ["%s.hgt.zip" % (f) for f in targets]
opts = {'region': 'Africa'}
extent = [15.3, 16.6, -1.4, 0.4]
datasets = dem.get_srtm(extent, merge=False, download=opts)
filelist = [os.path.basename(d.GetFileList()[0]) for d in datasets]
self.assertEqual(targets, filelist)
merged = dem.get_srtm(extent)
xsize = (datasets[0].RasterXSize-1)*2+1
ysize = (datasets[0].RasterXSize-1)*3+1
self.assertEqual(merged.RasterXSize, xsize)
self.assertEqual(merged.RasterYSize, ysize)
geo = merged.GetGeoTransform()
resolution = 3/3600
ulcx = 15 - resolution/2
ulcy = 1 + resolution/2
geo_ref = [ulcx, resolution, 0, ulcy, 0, -resolution]
np.testing.assert_array_almost_equal(geo, geo_ref)
if __name__ == '__main__':
unittest.main()
| kmuehlbauer/wradlib | wradlib/tests/test_io.py | Python | mit | 56,474 | [
"NetCDF"
] | 1cae5bec2eb9513ffdd9bb0fa636b312e0a124d5b30b74df31e2833bae0906c8 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
from pyscf.pbc.df import df
import pyscf.pbc.gto as pgto
from pyscf.pbc.lib import kpts_helper
from pyscf import ao2mo
L = 5.
n = 3
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = numpy.array([n,n,n])
cell.atom = '''He 3. 2. 3.
He 1. 1. 1.'''
cell.basis = 'ccpvdz'
cell.verbose = 0
cell.build(0,0)
nao = cell.nao_nr()
def finger(a):
w = np.cos(np.arange(a.size))
return np.dot(w, a.ravel())
class KnownValues(unittest.TestCase):
def test_eri1111(self):
kpts = numpy.random.random((4,3)) * .25
kpts[3] = -numpy.einsum('ij->j', kpts[:3])
with_df = df.DF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_eri0110(self):
kpts = numpy.random.random((4,3)) * .25
kpts[3] = kpts[0]
kpts[2] = kpts[1]
with_df = df.DF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_eri0101(self):
kpts = numpy.random.random((4,3)) * .25
kpts[2] = kpts[0]
kpts[3] = kpts[1]
with_df = df.DF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo ).reshape(nao**2,-1)
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_eri0000(self):
with_df = df.DF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = numpy.zeros((4,3))
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = ao2mo.restore(1, with_df.get_eri(with_df.kpts), nao)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, with_df.kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
mo = mo.real
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, with_df.kpts, compact=False)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_1d(self):
kpts = numpy.random.random((4,3)) * .25
kpts[3] = -numpy.einsum('ij->j', kpts[:3])
with_df = df.DF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
with_df.mesh = [11]*3
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
with lib.temporary_env(cell, dimension = 1):
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
with lib.temporary_env(cell, dimension = 1):
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_2d(self):
kpts = numpy.random.random((4,3)) * .25
kpts[3] = -numpy.einsum('ij->j', kpts[:3])
with_df = df.DF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
with lib.temporary_env(cell, dimension = 2):
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
with lib.temporary_env(cell, dimension = 2):
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_ao2mo_7d(self):
L = 3.
n = 6
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = [n,n,n]
cell.atom = '''He 2. 2.2 2.
He 1.2 1. 1.'''
cell.basis = {'He': [[0, (1.2, 1)], [1, (0.6, 1)]]}
cell.verbose = 0
cell.build(0,0)
kpts = cell.make_kpts([1,3,1])
nkpts = len(kpts)
nao = cell.nao_nr()
numpy.random.seed(1)
mo =(numpy.random.random((nkpts,nao,nao)) +
numpy.random.random((nkpts,nao,nao))*1j)
with_df = df.GDF(cell, kpts)
out = with_df.ao2mo_7d(mo, kpts)
ref = numpy.empty_like(out)
kconserv = kpts_helper.get_kconserv(cell, kpts)
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kj, kk]
tmp = with_df.ao2mo((mo[ki], mo[kj], mo[kk], mo[kl]), kpts[[ki,kj,kk,kl]])
ref[ki,kj,kk] = tmp.reshape([nao]*4)
self.assertAlmostEqual(abs(out-ref).max(), 0, 12)
if __name__ == '__main__':
print("Full Tests for df ao2mo")
unittest.main()
| gkc1000/pyscf | pyscf/pbc/df/test/test_df_ao2mo.py | Python | apache-2.0 | 7,511 | [
"PySCF"
] | d404519637dfabd178fe50a4e549f59d24e8b5ce8379b199f119c491ff28c6ce |
from distutils.core import setup
setup(name = 'IGCexpansion',
version = '0.3-dev',
author = 'Xiang Ji',
url = 'https://github.com/xji3/IGCexpansion',
#download_url = 'https://github.com/xji3/Genconv/tree/master/IGCexpansion/',
packages = ['IGCexpansion',],
install_requires=[
'Biopython', 'networkx', 'numpy', 'scipy', #'jsonctmctree'
],
dependency_links=[
'https://github.com/xji3/jsonctmctree/tarball/master#egg=package-0.2.0',
'git+https://github.com/xji3/jsonctmctree.git@master#egg=jsonctmctree-0.2.0'
]
#long_description = open('README.md').read()
)
| xji3/IGCexpansion | setup.py | Python | gpl-3.0 | 645 | [
"Biopython"
] | 5caebfa87d8e2975c2b10292c3cd8379fe4cf585986e33cdbea541386cd8f639 |
"""
Module of kernels that are able to handle continuous as well as categorical
variables (both ordered and unordered).
This is a slight deviation from the current approach in
statsmodels.nonparametric.kernels where each kernel is a class object.
Having kernel functions rather than classes makes extension to a multivariate
kernel density estimation much easier.
NOTE: As it is, this module does not interact with the existing API
"""
import numpy as np
from scipy.special import erf
#TODO:
# - make sure we only receive int input for wang-ryzin and aitchison-aitken
# - Check for the scalar Xi case everywhere
def aitchison_aitken(h, Xi, x, num_levels=None):
"""
The Aitchison-Aitken kernel, used for unordered discrete random variables.
Parameters
----------
h : 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : 2-D ndarray of ints, shape (nobs, K)
The value of the training set.
x: 1-D ndarray, shape (K,)
The value at which the kernel density is being estimated.
num_levels: bool, optional
Gives the user the option to specify the number of levels for the
random variable. If False, the number of levels is calculated from
the data.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var.
Notes
-----
See p.18 of [2]_ for details. The value of the kernel L if :math:`X_{i}=x`
is :math:`1-\lambda`, otherwise it is :math:`\frac{\lambda}{c-1}`.
Here :math:`c` is the number of levels plus one of the RV.
References
----------
.. [1] J. Aitchison and C.G.G. Aitken, "Multivariate binary discrimination
by the kernel method", Biometrika, vol. 63, pp. 413-420, 1976.
.. [2] Racine, Jeff. "Nonparametric Econometrics: A Primer," Foundation
and Trends in Econometrics: Vol 3: No 1, pp1-88., 2008.
"""
Xi = Xi.reshape(Xi.size) # seems needed in case Xi is scalar
if num_levels is None:
num_levels = np.asarray(np.unique(Xi).size)
kernel_value = np.ones(Xi.size) * h / (num_levels - 1)
idx = Xi == x
kernel_value[idx] = (idx * (1 - h))[idx]
return kernel_value
def wang_ryzin(h, Xi, x):
"""
The Wang-Ryzin kernel, used for ordered discrete random variables.
Parameters
----------
h : scalar or 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : ndarray of ints, shape (nobs, K)
The value of the training set.
x : scalar or 1-D ndarray of shape (K,)
The value at which the kernel density is being estimated.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var.
Notes
-----
See p. 19 in [1]_ for details. The value of the kernel L if
:math:`X_{i}=x` is :math:`1-\lambda`, otherwise it is
:math:`\frac{1-\lambda}{2}\lambda^{|X_{i}-x|}`, where :math:`\lambda` is
the bandwidth.
References
----------
.. [1] Racine, Jeff. "Nonparametric Econometrics: A Primer," Foundation
and Trends in Econometrics: Vol 3: No 1, pp1-88., 2008.
http://dx.doi.org/10.1561/0800000009
.. [2] M.-C. Wang and J. van Ryzin, "A class of smooth estimators for
discrete distributions", Biometrika, vol. 68, pp. 301-309, 1981.
"""
Xi = Xi.reshape(Xi.size) # seems needed in case Xi is scalar
kernel_value = 0.5 * (1 - h) * (h ** abs(Xi - x))
idx = Xi == x
kernel_value[idx] = (idx * (1 - h))[idx]
return kernel_value
def gaussian(h, Xi, x):
"""
Gaussian Kernel for continuous variables
Parameters
----------
h : 1-D ndarray, shape (K,)
The bandwidths used to estimate the value of the kernel function.
Xi : 1-D ndarray, shape (K,)
The value of the training set.
x : 1-D ndarray, shape (K,)
The value at which the kernel density is being estimated.
Returns
-------
kernel_value : ndarray, shape (nobs, K)
The value of the kernel function at each training point for each var.
"""
return (1. / np.sqrt(2 * np.pi)) * np.exp(-(Xi - x)**2 / (h**2 * 2.))
def gaussian_convolution(h, Xi, x):
""" Calculates the Gaussian Convolution Kernel """
return (1. / np.sqrt(4 * np.pi)) * np.exp(- (Xi - x)**2 / (h**2 * 4.))
def wang_ryzin_convolution(h, Xi, Xj):
# This is the equivalent of the convolution case with the Gaussian Kernel
# However it is not exactly convolution. Think of a better name
# References
ordered = np.zeros(Xi.size)
for x in np.unique(Xi):
ordered += wang_ryzin(h, Xi, x) * wang_ryzin(h, Xj, x)
return ordered
def aitchison_aitken_convolution(h, Xi, Xj):
Xi_vals = np.unique(Xi)
ordered = np.zeros(Xi.size)
num_levels = Xi_vals.size
for x in Xi_vals:
ordered += aitchison_aitken(h, Xi, x, num_levels=num_levels) * \
aitchison_aitken(h, Xj, x, num_levels=num_levels)
return ordered
def gaussian_cdf(h, Xi, x):
return 0.5 * h * (1 + erf((x - Xi) / (h * np.sqrt(2))))
def aitchison_aitken_cdf(h, Xi, x_u):
x_u = int(x_u)
Xi_vals = np.unique(Xi)
ordered = np.zeros(Xi.size)
num_levels = Xi_vals.size
for x in Xi_vals:
if x <= x_u: #FIXME: why a comparison for unordered variables?
ordered += aitchison_aitken(h, Xi, x, num_levels=num_levels)
return ordered
def wang_ryzin_cdf(h, Xi, x_u):
ordered = np.zeros(Xi.size)
for x in np.unique(Xi):
if x <= x_u:
ordered += wang_ryzin(h, Xi, x)
return ordered
def d_gaussian(h, Xi, x):
# The derivative of the Gaussian Kernel
return 2 * (Xi - x) * gaussian(h, Xi, x) / h**2
def aitchison_aitken_reg(h, Xi, x):
"""
A version for the Aitchison-Aitken kernel for nonparametric regression.
Suggested by Li and Racine.
"""
kernel_value = np.ones(Xi.size)
ix = Xi != x
inDom = ix * h
kernel_value[ix] = inDom[ix]
return kernel_value
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x)
| detrout/debian-statsmodels | statsmodels/nonparametric/kernels.py | Python | bsd-3-clause | 6,365 | [
"Gaussian"
] | 8309b11f7b76720660e01d0358751828b36953ef9613b5b371543d990e44ca8b |
#!/usr/bin/env python
'''buildScriptsDocs
Build scripts documentation from the scripts docstrings. The scripts are not
very uniform
'''
import glob
import os
import sys
import subprocess
from DIRAC import rootPath
# Scripts that either do not have -h, are obsolete or cause havoc when called
BAD_SCRIPTS = ['dirac-deploy-scripts', 'dirac-install', 'dirac-compile-externals',
'dirac-framework-self-ping', 'dirac-dms-add-files',
]
MARKERS_SECTIONS_SCRIPTS = [(['dms'], 'Data Management', [], []),
(['wms'], 'Workload Management', [], []),
(['dirac-proxy', 'dirac-info', 'dirac-version', 'myproxy'],
'Others', [], ['dirac-cert-convert.sh']),
# (['rss'],'Resource Status Management', [], []),
# (['rms'],'Request Management', [], []),
# (['stager'],'Storage Management', [], []),
# (['transformation'], 'Transformation Management', [], []),
# (['admin', 'accounting', 'FrameworkSystem',
# 'ConfigurationSystem', 'Core',], 'Admin', [], []),
# ([''], 'CatchAll', [], []),
]
def mkdir(path):
""" save mkdir, ignores exceptions """
try:
os.makedirs(path)
except OSError:
pass
def runCommand(command):
""" execute shell command, return output, catch exceptions """
command = command.strip().split(" ")
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except (OSError, subprocess.CalledProcessError) as e:
print "Error when runnning", command, "\n", repr(e)
return ''
def getScripts():
""" get all scripts in the Dirac System, split by type admin/wms/rms/other """
diracPath = os.path.join(rootPath, 'DIRAC')
if not os.path.exists(diracPath):
sys.exit('%s does not exist' % diracPath)
# Get all scripts
scriptsPath = os.path.join(diracPath, '*', 'scripts', '*.py')
# Get all scripts on scriptsPath and sorts them, this will make our life easier
# afterwards
scripts = glob.glob(scriptsPath)
scripts.sort()
for scriptPath in scripts:
# Few modules still have __init__.py on the scripts directory
if '__init__' in scriptPath or 'build' in scriptPath:
print "ignoring", scriptPath
continue
# if os.path.basename(scriptPath) in BAD_SCRIPTS:
# print "ignoring", scriptPath
# continue
for mT in MARKERS_SECTIONS_SCRIPTS:
if any(pattern in scriptPath for pattern in mT[0]):
mT[2].append(scriptPath)
break
return
def createFoldersAndIndices():
""" creates the index files and folders where the RST files will go
e.g.:
source/UserGuide/CommandReference
"""
# create the main UserGuide Index file
userIndexRST = """
========================================
Commands Reference (|release|)
========================================
This page is the work in progress. See more material here soon !
.. toctree::
:maxdepth: 1
"""
for mT in MARKERS_SECTIONS_SCRIPTS:
system = mT[1]
systemString = system.replace(" ", "")
userIndexRST += " %s/index\n" % systemString
print userIndexRST
sectionPath = os.path.join(rootPath, 'DIRAC/docs/source/UserGuide/CommandReference/', systemString)
mkdir(sectionPath)
createSectionIndex(mT, sectionPath)
userIndexPath = os.path.join(rootPath, 'DIRAC/docs/source/UserGuide/CommandReference/index.rst')
with open(userIndexPath, 'w') as userIndexFile:
userIndexFile.write(userIndexRST)
def createSectionIndex(mT, sectionPath):
""" create the index """
systemName = mT[1]
systemHeader = systemName + " Command Reference"
systemHeader = "%s\n%s\n%s\n" % ("=" * len(systemHeader), systemHeader, "=" * len(systemHeader))
sectionIndexRST = systemHeader + """
In this subsection the %s commands are collected
.. toctree::
:maxdepth: 2
""" % systemName
# these scripts use pre-existing rst files, cannot re-create them automatically
for script in mT[3]:
scriptName = os.path.basename(script)
sectionIndexRST += " %s\n" % scriptName
for script in mT[2]:
scriptName = os.path.basename(script)
if scriptName.endswith('.py'):
scriptName = scriptName[:-3]
if createScriptDocFiles(script, sectionPath, scriptName):
sectionIndexRST += " %s\n" % scriptName
sectionIndexPath = os.path.join(sectionPath, 'index.rst')
with open(sectionIndexPath, 'w') as sectionIndexFile:
sectionIndexFile.write(sectionIndexRST)
def createScriptDocFiles(script, sectionPath, scriptName):
""" create the RST files for all the scripts
folders and indices already exist, just call the scripts and get the help messages...
"""
if scriptName in BAD_SCRIPTS:
return False
print "Creating Doc for", scriptName
helpMessage = runCommand("%s -h" % script)
if not helpMessage:
print "NO DOC For", scriptName
return False
scriptRSTPath = os.path.join(sectionPath, scriptName + '.rst')
with open(scriptRSTPath, 'w') as rstFile:
rstFile.write('=' * len(scriptName))
rstFile.write('\n%s\n' % scriptName)
rstFile.write('=' * len(scriptName))
rstFile.write('\n')
for line in helpMessage.splitlines():
line = line.rstrip()
newLine = line + ":\n\n" if line.endswith(":") else line + "\n"
rstFile.write(newLine)
return True
def run():
''' creates the rst files right in the source tree of the docs
'''
getScripts()
createFoldersAndIndices()
print 'Done'
if __name__ == "__main__":
run()
| andresailer/DIRAC | docs/Tools/buildScriptsDocs.py | Python | gpl-3.0 | 5,675 | [
"DIRAC"
] | 0b9f5b5c12e92c5c8d720a2dd77c1e150eb7560345de1536bd21d59614219ddb |
from __future__ import division, print_function
import numpy as np
from bct.utils import cuberoot, binarize, invert
def breadthdist(CIJ):
'''
The binary reachability matrix describes reachability between all pairs
of nodes. An entry (u,v)=1 means that there exists a path from node u
to node v; alternatively (u,v)=0.
The distance matrix contains lengths of shortest paths between all
pairs of nodes. An entry (u,v) represents the length of shortest path
from node u to node v. The average shortest path length is the
characteristic path length of the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
R : NxN np.ndarray
binary reachability matrix
D : NxN np.ndarray
distance matrix
Notes
-----
slower but less memory intensive than "reachdist.m".
'''
n = len(CIJ)
D = np.zeros((n, n))
for i in range(n):
D[i, :], _ = breadth(CIJ, i)
D[D == 0] = np.inf
R = (D != np.inf)
return R, D
def breadth(CIJ, source):
'''
Implementation of breadth-first search.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
source : int
source vertex
Returns
-------
distance : Nx1 np.ndarray
vector of distances between source and ith vertex (0 for source)
branch : Nx1 np.ndarray
vertex that precedes i in the breadth-first search (-1 for source)
Notes
-----
Breadth-first search tree does not contain all paths (or all
shortest paths), but allows the determination of at least one path with
minimum distance. The entire graph is explored, starting from source
vertex 'source'.
'''
n = len(CIJ)
# colors: white,gray,black
white = 0
gray = 1
black = 2
color = np.zeros((n,))
distance = np.inf * np.ones((n,))
branch = np.zeros((n,))
# start on vertex source
color[source] = gray
distance[source] = 0
branch[source] = -1
Q = [source]
# keep going until the entire graph is explored
while Q:
u = Q[0]
ns, = np.where(CIJ[u, :])
for v in ns:
# this allows the source distance itself to be recorded
if distance[v] == 0:
distance[v] = distance[u] + 1
if color[v] == white:
color[v] = gray
distance[v] = distance[u] + 1
branch[v] = u
Q.append(v)
Q = Q[1:]
color[u] = black
return distance, branch
def charpath(D, include_diagonal=False, include_infinite=True):
'''
The characteristic path length is the average shortest path length in
the network. The global efficiency is the average inverse shortest path
length in the network.
Parameters
----------
D : NxN np.ndarray
distance matrix
include_diagonal : bool
If True, include the weights on the diagonal. Default value is False.
include_infinite : bool
If True, include infinite distances in calculation
Returns
-------
lambda : float
characteristic path length
efficiency : float
global efficiency
ecc : Nx1 np.ndarray
eccentricity at each vertex
radius : float
radius of graph
diameter : float
diameter of graph
Notes
-----
The input distance matrix may be obtained with any of the distance
functions, e.g. distance_bin, distance_wei.
Characteristic path length is calculated as the global mean of
the distance matrix D, excludings any 'Infs' but including distances on
the main diagonal.
'''
D = D.copy()
if not include_diagonal:
np.fill_diagonal(D, np.nan)
if not include_infinite:
D[np.isinf(D)] = np.nan
Dv = D[np.logical_not(np.isnan(D))].ravel()
# mean of finite entries of D[G]
lambda_ = np.mean(Dv)
# efficiency: mean of inverse entries of D[G]
efficiency = np.mean(1 / Dv)
# eccentricity for each vertex (ignore inf)
ecc = np.array(np.ma.masked_where(np.isnan(D), D).max(axis=1))
# radius of graph
radius = np.min(ecc) # but what about zeros?
# diameter of graph
diameter = np.max(ecc)
return lambda_, efficiency, ecc, radius, diameter
def cycprob(Pq):
'''
Cycles are paths which begin and end at the same node. Cycle
probability for path length d, is the fraction of all paths of length
d-1 that may be extended to form cycles of length d.
Parameters
----------
Pq : NxNxQ np.ndarray
Path matrix with Pq[i,j,q] = number of paths from i to j of length q.
Produced by findpaths()
Returns
-------
fcyc : Qx1 np.ndarray
fraction of all paths that are cycles for each path length q
pcyc : Qx1 np.ndarray
probability that a non-cyclic path of length q-1 can be extended to
form a cycle of length q for each path length q
'''
# note: fcyc[1] must be zero, as there cannot be cycles of length 1
fcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q]) > 0:
fcyc[q] = np.sum(np.diag(Pq[:, :, q])) / np.sum(Pq[:, :, q])
else:
fcyc[q] = 0
# note: pcyc[1] is not defined (set to zero)
# note: pcyc[2] is equal to the fraction of reciprocal connections
# note: there are no non-cyclic paths of length N and no cycles of len N+1
pcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])) > 0:
pcyc[q] = (np.sum(np.diag(Pq[:, :, q - 1])) /
np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])))
else:
pcyc[q] = 0
return fcyc, pcyc
def distance_bin(G):
'''
The distance matrix contains lengths of shortest paths between all
pairs of nodes. An entry (u,v) represents the length of shortest path
from node u to node v. The average shortest path length is the
characteristic path length of the network.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
D : NxN
distance matrix
Notes
-----
Lengths between disconnected nodes are set to Inf.
Lengths on the main diagonal are set to 0.
Algorithm: Algebraic shortest paths.
'''
G = binarize(G, copy=True)
D = np.eye(len(G))
n = 1
nPATH = G.copy() # n path matrix
L = (nPATH != 0) # shortest n-path matrix
while np.any(L):
D += n * L
n += 1
nPATH = np.dot(nPATH, G)
L = (nPATH != 0) * (D == 0)
D[D == 0] = np.inf # disconnected nodes are assigned d=inf
np.fill_diagonal(D, 0)
return D
def distance_wei(G):
'''
The distance matrix contains lengths of shortest paths between all
pairs of nodes. An entry (u,v) represents the length of shortest path
from node u to node v. The average shortest path length is the
characteristic path length of the network.
Parameters
----------
L : NxN np.ndarray
Directed/undirected connection-length matrix.
NB L is not the adjacency matrix. See below.
Returns
-------
D : NxN np.ndarray
distance (shortest weighted path) matrix
B : NxN np.ndarray
matrix of number of edges in shortest weighted path
Notes
-----
The input matrix must be a connection-length matrix, typically
obtained via a mapping from weight to length. For instance, in a
weighted correlation network higher correlations are more naturally
interpreted as shorter distances and the input matrix should
consequently be some inverse of the connectivity matrix.
The number of edges in shortest weighted paths may in general
exceed the number of edges in shortest binary paths (i.e. shortest
paths computed on the binarized connectivity matrix), because shortest
weighted paths have the minimal weighted distance, but not necessarily
the minimal number of edges.
Lengths between disconnected nodes are set to Inf.
Lengths on the main diagonal are set to 0.
Algorithm: Dijkstra's algorithm.
'''
n = len(G)
D = np.zeros((n, n)) # distance matrix
D[np.logical_not(np.eye(n))] = np.inf
B = np.zeros((n, n)) # number of edges matrix
for u in range(n):
# distance permanence (true is temporary)
S = np.ones((n,), dtype=bool)
G1 = G.copy()
V = [u]
while True:
S[V] = 0 # distance u->V is now permanent
G1[:, V] = 0 # no in-edges as already shortest
for v in V:
W, = np.where(G1[v, :]) # neighbors of shortest nodes
td = np.array(
[D[u, W].flatten(), (D[u, v] + G1[v, W]).flatten()])
d = np.min(td, axis=0)
wi = np.argmin(td, axis=0)
D[u, W] = d # smallest of old/new path lengths
ind = W[np.where(wi == 1)] # indices of lengthened paths
# increment nr_edges for lengthened paths
B[u, ind] = B[u, v] + 1
if D[u, S].size == 0: # all nodes reached
break
minD = np.min(D[u, S])
if np.isinf(minD): # some nodes cannot be reached
break
V, = np.where(D[u, :] == minD)
return D, B
def distance_wei_floyd(adjacency, transform=None):
"""
Computes the topological length of the shortest possible path connecting
every pair of nodes in the network.
Parameters
----------
D : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
Returns
-------
SPL : (N x N) ndarray
Weighted/unweighted shortest path-length array. If `D` is a directed
graph, then `SPL` is not symmetric
hops : (N x N) ndarray
Number of edges in the shortest path array. If `D` is unweighted, `SPL`
and `hops` are identical.
Pmat : (N x N) ndarray
Element `[i,j]` of this array indicates the next node in the shortest
path between `i` and `j`. This array is used as an input argument for
function `retrieve_shortest_path()`, which returns as output the
sequence of nodes comprising the shortest path between a given pair of
nodes.
Notes
-----
There may be more than one shortest path between any pair of nodes in the
network. Non-unique shortest paths are termed shortest path degeneracies
and are most likely to occur in unweighted networks. When the shortest-path
is degenerate, the elements of `Pmat` correspond to the first shortest path
discovered by the algorithm.
The input array may be either a connection weight or length array. The
connection length array is typically obtained with a mapping from weight to
length, such that higher weights are mapped to shorter lengths (see
argument `transform`, above).
Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012)
References
----------
.. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of
the ACM, 5(6), 345.
.. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus
Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218.
.. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the
ACM (JACM), 9(1), 11-12.
.. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm
"""
if transform is not None:
if transform == 'log':
if np.logical_or(adjacency > 1, adjacency < 0).any():
raise ValueError("Connection strengths must be in the " +
"interval [0,1) to use the transform " +
"-log(w_ij).")
SPL = -np.log(adjacency)
elif transform == 'inv':
SPL = 1. / adjacency
else:
raise ValueError("Unexpected transform type. Only 'log' and " +
"'inv' are accepted")
else:
SPL = adjacency.copy().astype('float')
SPL[SPL == 0] = np.inf
n = adjacency.shape[1]
flag_find_paths = True
hops = np.array(adjacency != 0).astype('float')
Pmat = np.repeat(np.atleast_2d(np.arange(0, n)), n, 0)
for k in range(n):
i2k_k2j = np.repeat(SPL[:, [k]], n, 1) + np.repeat(SPL[[k], :], n, 0)
if flag_find_paths:
path = SPL > i2k_k2j
i, j = np.where(path)
hops[path] = hops[i, k] + hops[k, j]
Pmat[path] = Pmat[i, k]
SPL = np.min(np.stack([SPL, i2k_k2j], 2), 2)
I = np.eye(n) > 0
SPL[I] = 0
if flag_find_paths:
hops[I], Pmat[I] = 0, 0
return SPL, hops, Pmat
def retrieve_shortest_path(s, t, hops, Pmat):
"""
Returns nodes comprising shortest path between `s` and `t`
This function finds the sequence of nodes that comprise the shortest path
between a given source and target node.
Parameters
----------
s : int
Source node, i.e. node where the shortest path begins
t : int
Target node, i.e. node where the shortest path ends
hops : (N x N) array_like
Number of edges in the path. This array may be obtained as the
second output argument of the function `distance_wei_floyd`.
Pmat : (N x N) array_like
Array whose elements `Pmat[k,t]` indicate the next node in the shortest
path between nodes `k` and `t`. This array may be obtained as the third
output of the function `distance_wei_floyd`.
Returns
-------
path : ndarray
Nodes (indices) comprising the shortest path between `s` and `t`
Notes
-----
Originally written in Matlab by Andrea Avena-Koenigsberger and Joaquin Goni
(IU, 2012)
"""
path_length = hops[s, t]
if path_length != 0:
path = np.zeros((int(path_length + 1), 1), dtype='int')
path[0] = s
for ind in range(1, len(path)):
s = Pmat[s, t]
path[ind] = s
else:
path = []
return path
def efficiency_bin(G, local=False):
'''
The global efficiency is the average of inverse shortest path length,
and is inversely related to the characteristic path length.
The local efficiency is the global efficiency computed on the
neighborhood of the node, and is related to the clustering coefficient.
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
local : bool
If True, computes local efficiency instead of global efficiency.
Default value = False.
Returns
-------
Eglob : float
global efficiency, only if local=False
Eloc : Nx1 np.ndarray
local efficiency, only if local=True
'''
def distance_inv(g):
D = np.eye(len(g))
n = 1
nPATH = g.copy()
L = (nPATH != 0)
while np.any(L):
D += n * L
n += 1
nPATH = np.dot(nPATH, g)
L = (nPATH != 0) * (D == 0)
D[np.logical_not(D)] = np.inf
D = 1 / D
np.fill_diagonal(D, 0)
return D
G = binarize(G)
n = len(G) # number of nodes
if local:
E = np.zeros((n,)) # local efficiency
for u in range(n):
# V,=np.where(G[u,:]) #neighbors
# k=len(V) #degree
# if k>=2: #degree must be at least 2
# e=distance_inv(G[V].T[V])
# E[u]=np.sum(e)/(k*k-k) #local efficiency computation
# find pairs of neighbors
V, = np.where(np.logical_or(G[u, :], G[u, :].T))
# inverse distance matrix
e = distance_inv(G[np.ix_(V, V)])
# symmetrized inverse distance matrix
se = e + e.T
# symmetrized adjacency vector
sa = G[u, V] + G[V, u].T
numer = np.sum(np.outer(sa.T, sa) * se) / 2
if numer != 0:
denom = np.sum(sa)**2 - np.sum(sa * sa)
E[u] = numer / denom # local efficiency
else:
e = distance_inv(G)
E = np.sum(e) / (n * n - n) # global efficiency
return E
def efficiency_wei(Gw, local=False):
'''
The global efficiency is the average of inverse shortest path length,
and is inversely related to the characteristic path length.
The local efficiency is the global efficiency computed on the
neighborhood of the node, and is related to the clustering coefficient.
Parameters
----------
W : NxN np.ndarray
undirected weighted connection matrix
(all weights in W must be between 0 and 1)
local : bool
If True, computes local efficiency instead of global efficiency.
Default value = False.
Returns
-------
Eglob : float
global efficiency, only if local=False
Eloc : Nx1 np.ndarray
local efficiency, only if local=True
Notes
-----
The efficiency is computed using an auxiliary connection-length
matrix L, defined as L_ij = 1/W_ij for all nonzero L_ij; This has an
intuitive interpretation, as higher connection weights intuitively
correspond to shorter lengths.
The weighted local efficiency broadly parallels the weighted
clustering coefficient of Onnela et al. (2005) and distinguishes the
influence of different paths based on connection weights of the
corresponding neighbors to the node in question. In other words, a path
between two neighbors with strong connections to the node in question
contributes more to the local efficiency than a path between two weakly
connected neighbors. Note that this weighted variant of the local
efficiency is hence not a strict generalization of the binary variant.
Algorithm: Dijkstra's algorithm
'''
def distance_inv_wei(G):
n = len(G)
D = np.zeros((n, n)) # distance matrix
D[np.logical_not(np.eye(n))] = np.inf
for u in range(n):
# distance permanence (true is temporary)
S = np.ones((n,), dtype=bool)
G1 = G.copy()
V = [u]
while True:
S[V] = 0 # distance u->V is now permanent
G1[:, V] = 0 # no in-edges as already shortest
for v in V:
W, = np.where(G1[v, :]) # neighbors of smallest nodes
td = np.array(
[D[u, W].flatten(), (D[u, v] + G1[v, W]).flatten()])
D[u, W] = np.min(td, axis=0)
if D[u, S].size == 0: # all nodes reached
break
minD = np.min(D[u, S])
if np.isinf(minD): # some nodes cannot be reached
break
V, = np.where(D[u, :] == minD)
np.fill_diagonal(D, 1)
D = 1 / D
np.fill_diagonal(D, 0)
return D
n = len(Gw)
Gl = invert(Gw, copy=True) # connection length matrix
A = np.array((Gw != 0), dtype=int)
if local:
E = np.zeros((n,)) # local efficiency
for u in range(n):
# V,=np.where(Gw[u,:]) #neighbors
# k=len(V) #degree
# if k>=2: #degree must be at least 2
# e=(distance_inv_wei(Gl[V].T[V])*np.outer(Gw[V,u],Gw[u,V]))**1/3
# E[u]=np.sum(e)/(k*k-k)
# find pairs of neighbors
V, = np.where(np.logical_or(Gw[u, :], Gw[:, u].T))
# symmetrized vector of weights
sw = cuberoot(Gw[u, V]) + cuberoot(Gw[V, u].T)
# inverse distance matrix
e = distance_inv_wei(Gl[np.ix_(V, V)])
# symmetrized inverse distance matrix
se = cuberoot(e) + cuberoot(e.T)
numer = np.sum(np.outer(sw.T, sw) * se) / 2
if numer != 0:
# symmetrized adjacency vector
sa = A[u, V] + A[V, u].T
denom = np.sum(sa)**2 - np.sum(sa * sa)
# print numer,denom
E[u] = numer / denom # local efficiency
else:
e = distance_inv_wei(Gl)
E = np.sum(e) / (n * n - n)
return E
def findpaths(CIJ, qmax, sources, savepths=False):
'''
Paths are sequences of linked nodes, that never visit a single node
more than once. This function finds all paths that start at a set of
source nodes, up to a specified length. Warning: very memory-intensive.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
qmax : int
maximal path length
sources : Nx1 np.ndarray
source units from which paths are grown
savepths : bool
True if all paths are to be collected and returned. This functionality
is currently not enabled.
Returns
-------
Pq : NxNxQ np.ndarray
Path matrix with P[i,j,jq] = number of paths from i to j with length q
tpath : int
total number of paths found
plq : Qx1 np.ndarray
path length distribution as a function of q
qstop : int
path length at which findpaths is stopped
allpths : None
a matrix containing all paths up to qmax. This function is extremely
complicated and reimplementing it in bctpy is not straightforward.
util : NxQ np.ndarray
node use index
Notes
-----
Note that Pq(:,:,N) can only carry entries on the diagonal, as all
"legal" paths of length N-1 must terminate. Cycles of length N are
possible, with all vertices visited exactly once (except for source and
target). 'qmax = N' can wreak havoc (due to memory problems).
Note: Weights are discarded.
Note: I am certain that this algorithm is rather inefficient -
suggestions for improvements are welcome.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
n = len(CIJ)
k = np.sum(CIJ)
pths = []
Pq = np.zeros((n, n, qmax))
util = np.zeros((n, qmax))
# this code is for pathlength=1
# paths are seeded from sources
q = 1
for j in range(n):
for i in range(len(sources)):
i_s = sources[i]
if CIJ[i_s, j] == 1:
pths.append([i_s, j])
pths = np.array(pths)
# calculate the use index per vertex (for paths of length 1)
util[:, q], _ = np.histogram(pths, bins=n)
# now enter the found paths of length 1 into the pathmatrix Pq
for nrp in range(np.size(pths, axis=0)):
Pq[pths[nrp, 0], pths[nrp, q], q - 1] += 1
# begin saving allpths
if savepths:
allpths = pths.copy()
else:
allpths = []
npthscnt = k
# big loop for all other pathlengths q
for q in range(2, qmax + 1):
# to keep track of time...
print((
'current pathlength (q=i, number of paths so far (up to q-1)=i' % (q, np.sum(Pq))))
# old paths are now in 'pths'
# new paths are about to be collected in 'npths'
# estimate needed allocation for new paths
len_npths = np.min((np.ceil(1.1 * npthscnt * k / n), 100000000))
npths = np.zeros((q + 1, len_npths))
# find the unique set of endpoints of 'pths'
endp = np.unique(pths[:, q - 1])
npthscnt = 0
for i in endp: # set of endpoints of previous paths
# in 'pb' collect all previous paths with 'i' as their endpoint
pb, = np.where(pths[:, q - 1] == i)
# find the outgoing connections from i (breadth-first)
nendp, = np.where(CIJ[i, :] == 1)
# if i is not a dead end
if nendp.size:
for j in nendp: # endpoints of next edge
# find new paths -- only legal ones, no vertex twice
# visited
pb_temp = pb[np.sum(j == pths[pb, 1:q], axis=1) == 0]
# add new paths to 'npths'
pbx = pths[pb_temp - 1, :]
npx = np.ones((len(pb_temp), 1)) * j
npths[:, npthscnt:npthscnt + len(pb_temp)] = np.append(
pbx, npx, axis=1).T
npthscnt += len(pb_temp)
# count new paths and add the number to P
Pq[:n, j, q -
1] += np.histogram(pths[pb_temp - 1, 0], bins=n)[0]
# note: 'npths' now contains a list of all the paths of length q
if len_npths > npthscnt:
npths = npths[:, :npthscnt]
# append the matrix of all paths
# FIXME
if savepths:
raise NotImplementedError("Sorry allpaths is not yet implemented")
# calculate the use index per vertex (correct for cycles, count
# source/target only once)
util[:, q - 1] += (np.histogram(npths[:, :npthscnt], bins=n)[0] -
np.diag(Pq[:, :, q - 1]))
# elininate cycles from "making it" to the next level, so that "pths"
# contains all the paths that have a chance of being continued
if npths.size:
pths = np.squeeze(npths[:, np.where(npths[0, :] != npths[q, :])]).T
else:
pths = []
# if there are no 'pths' paths left, end the search
if not pths.size:
qstop = q
tpath = np.sum(Pq)
plq = np.sum(np.sum(Pq, axis=0), axis=0)
return
qstop = q
tpath = np.sum(Pq) # total number of paths
plq = np.sum(np.sum(Pq, axis=0), axis=0) # path length distribution
return Pq, tpath, plq, qstop, allpths, util
def findwalks(CIJ):
'''
Walks are sequences of linked nodes, that may visit a single node more
than once. This function finds the number of walks of a given length,
between any two nodes.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
Wq : NxNxQ np.ndarray
Wq[i,j,q] is the number of walks from i to j of length q
twalk : int
total number of walks found
wlq : Qx1 np.ndarray
walk length distribution as a function of q
Notes
-----
Wq grows very quickly for larger N,K,q. Weights are discarded.
'''
CIJ = binarize(CIJ, copy=True)
n = len(CIJ)
Wq = np.zeros((n, n, n))
CIJpwr = CIJ.copy()
Wq[:, :, 1] = CIJ
for q in range(n):
CIJpwr = np.dot(CIJpwr, CIJ)
Wq[:, :, q] = CIJpwr
twalk = np.sum(Wq) # total number of walks
wlq = np.sum(np.sum(Wq, axis=0), axis=0)
return Wq, twalk, wlq
def reachdist(CIJ, ensure_binary=True):
'''
The binary reachability matrix describes reachability between all pairs
of nodes. An entry (u,v)=1 means that there exists a path from node u
to node v; alternatively (u,v)=0.
The distance matrix contains lengths of shortest paths between all
pairs of nodes. An entry (u,v) represents the length of shortest path
from node u to node v. The average shortest path length is the
characteristic path length of the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
ensure_binary : bool
Binarizes input. Defaults to true. No user who is not testing
something will ever want to not use this, use distance_wei instead for
unweighted matrices.
Returns
-------
R : NxN np.ndarray
binary reachability matrix
D : NxN np.ndarray
distance matrix
Notes
-----
faster but more memory intensive than "breadthdist.m".
'''
def reachdist2(CIJ, CIJpwr, R, D, n, powr, col, row):
CIJpwr = np.dot(CIJpwr, CIJ)
R = np.logical_or(R, CIJpwr != 0)
D += R
if powr <= n and np.any(R[np.ix_(row, col)] == 0):
powr += 1
R, D, powr = reachdist2(CIJ, CIJpwr, R, D, n, powr, col, row)
return R, D, powr
if ensure_binary:
CIJ = binarize(CIJ)
R = CIJ.copy()
D = CIJ.copy()
powr = 2
n = len(CIJ)
CIJpwr = CIJ.copy()
# check for vertices that have no incoming or outgoing connections
# these are ignored by reachdist
id = np.sum(CIJ, axis=0)
od = np.sum(CIJ, axis=1)
id0, = np.where(id == 0) # nothing goes in, so column(R) will be 0
od0, = np.where(od == 0) # nothing comes out, so row(R) will be 0
# use these colums and rows to check for reachability
col = list(range(n))
col = np.delete(col, id0)
row = list(range(n))
row = np.delete(row, od0)
R, D, powr = reachdist2(CIJ, CIJpwr, R, D, n, powr, col, row)
#'invert' CIJdist to get distances
D = powr - D + 1
# put inf if no path found
D[D == n + 2] = np.inf
D[:, id0] = np.inf
D[od0, :] = np.inf
return R, D
def search_information(adjacency, transform=None, has_memory=False):
"""
Calculates search information of `adjacency`
Computes the amount of information (measured in bits) that a random walker
needs to follow the shortest path between a given pair of nodes.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
has_memory : bool, optional
This flag defines whether or not the random walker "remembers" its
previous step, which has the effect of reducing the amount of
information needed to find the next state. Default: False
Returns
-------
SI : (N x N) ndarray
Pair-wise search information array. Note that `SI[i,j]` may be
different from `SI[j,i]``; hence, `SI` is not a symmetric matrix even
when `adjacency` is symmetric.
References
----------
.. [1] Goni, J., van den Heuvel, M. P., Avena-Koenigsberger, A., de
Mendizabal, N. V., Betzel, R. F., Griffa, A., Hagmann, P.,
Corominas-Murtra, B., Thiran, J-P., & Sporns, O. (2014). Resting-brain
functional connectivity predicted by analytic measures of network
communication. Proceedings of the National Academy of Sciences, 111(2),
833-838.
.. [2] Rosvall, M., Trusina, A., Minnhagen, P., & Sneppen, K. (2005).
Networks and cities: An information perspective. Physical Review
Letters, 94(2), 028701.
"""
N = len(adjacency)
if np.allclose(adjacency, adjacency.T):
flag_triu = True
else:
flag_triu = False
T = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
_, hops, Pmat = distance_wei_floyd(adjacency, transform)
SI = np.zeros((N, N))
SI[np.eye(N) > 0] = np.nan
for i in range(N):
for j in range(N):
if (j > i and flag_triu) or (not flag_triu and i != j):
path = retrieve_shortest_path(i, j, hops, Pmat)
lp = len(path) - 1
if flag_triu:
if np.any(path):
pr_step_ff = np.zeros(lp)
pr_step_bk = np.zeros(lp)
if has_memory:
pr_step_ff[0] = T[path[0], path[1]]
pr_step_bk[lp-1] = T[path[lp], path[lp-1]]
for z in range(1, lp):
pr_step_ff[z] = T[path[z], path[z+1]] / (1 - T[path[z-1], path[z]])
pr_step_bk[lp-z-1] = T[path[lp-z], path[lp-z-1]] / (1 - T[path[lp-z+1], path[lp-z]])
else:
for z in range(lp):
pr_step_ff[z] = T[path[z], path[z+1]]
pr_step_bk[z] = T[path[z+1], path[z]]
prob_sp_ff = np.prod(pr_step_ff)
prob_sp_bk = np.prod(pr_step_bk)
SI[i, j] = -np.log2(prob_sp_ff)
SI[j, i] = -np.log2(prob_sp_bk)
else:
if np.any(path):
pr_step_ff = np.zeros(lp)
if has_memory:
pr_step_ff[0] = T[path[0], path[1]]
for z in range(1, lp):
pr_step_ff[z] = T[path[z], path[z+1]] / (1 - T[path[z-1], path[z]])
else:
for z in range(lp):
pr_step_ff[z] = T[path[z], path[z+1]]
prob_sp_ff = np.prod(pr_step_ff)
SI[i, j] = -np.log2(prob_sp_ff)
else:
SI[i, j] = np.inf
return SI
def mean_first_passage_time(adjacency):
"""
Calculates mean first passage time of `adjacency`
The first passage time from i to j is the expected number of steps it takes
a random walker starting at node i to arrive for the first time at node j.
The mean first passage time is not a symmetric measure: `mfpt(i,j)` may be
different from `mfpt(j,i)`.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
Returns
-------
MFPT : (N x N) ndarray
Pairwise mean first passage time array
References
----------
.. [1] Goni, J., Avena-Koenigsberger, A., de Mendizabal, N. V., van den
Heuvel, M. P., Betzel, R. F., & Sporns, O. (2013). Exploring the
morphospace of communication efficiency in complex networks. PLoS One,
8(3), e58070.
"""
P = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
n = len(P)
D, V = np.linalg.eig(P.T)
aux = np.abs(D - 1)
index = np.where(aux == aux.min())[0]
if aux[index] > 10e-3:
raise ValueError("Cannot find eigenvalue of 1. Minimum eigenvalue " +
"value is {0}. Tolerance was ".format(aux[index]+1) +
"set at 10e-3.")
w = V[:, index].T
w = w / np.sum(w)
W = np.real(np.repeat(w, n, 0))
I = np.eye(n)
Z = np.linalg.inv(I - P + W)
mfpt = (np.repeat(np.atleast_2d(np.diag(Z)), n, 0) - Z) / W
return mfpt
| clbarnes/bctpy | bct/algorithms/distance.py | Python | gpl-3.0 | 35,085 | [
"VisIt"
] | 01b7cd971e580f012e51a3b19c79765797316108d5930a7eaa51c9d7b15326ed |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common constants and utility functions."""
# Input tf.Example field keys.
TFE_KEY_IMAGE_HEIGHT = 'image/height'
TFE_KEY_IMAGE_WIDTH = 'image/width'
TFE_KEY_PREFIX_KEYPOINT_2D = 'image/object/part/'
TFE_KEY_SUFFIX_KEYPOINT_2D = ['/center/y', '/center/x']
TFE_KEY_PREFIX_KEYPOINT_3D = 'image/object/part_3d/'
TFE_KEY_SUFFIX_KEYPOINT_3D = ['/center/y', '/center/x', '/center/z']
TFE_KEY_SUFFIX_KEYPOINT_SCORE = '/score'
TFE_KEY_FEATURE = 'feature/data'
TFE_KEY_CLASS_LABEL_ID = 'image/class/label'
TFE_KEY_CLASS_LABEL_CONFIDENCE = 'image/class/confidence'
# Input tf.SequenceExample context feature field keys.
TFSE_KEY_IMAGE_HEIGHT = 'image/height'
TFSE_KEY_IMAGE_WIDTH = 'image/width'
TFSE_KEY_SUFFIX_KEYPOINT_2D = ['/region/point/y', '/region/point/x']
TFSE_KEY_SUFFIX_KEYPOINT_2D_SCORE = '/region/point/score'
TFSE_KEY_SUFFIX_KEYPOINT_3D = [
'/region/3d_point/y', '/region/3d_point/x', '/region/3d_point/z'
]
TFSE_KEY_SUFFIX_KEYPOINT_3D_SCORE = '/region/3d_point/score'
# Input keys.
KEY_IMAGE_SIZES = 'image_sizes'
KEY_KEYPOINTS_2D = 'keypoints_2d'
KEY_KEYPOINT_SCORES_2D = 'keypoint_scores_2d'
KEY_KEYPOINT_MASKS_2D = 'keypoint_masks_2d'
KEY_PREPROCESSED_KEYPOINTS_2D = 'preprocessed_keypoints_2d'
KEY_PREPROCESSED_KEYPOINT_MASKS_2D = 'preprocessed_keypoint_masks_2d'
KEY_OFFSET_POINTS_2D = 'offset_points_2d'
KEY_SCALE_DISTANCES_2D = 'scale_distances_2d'
KEY_KEYPOINTS_3D = 'keypoints_3d'
KEY_PREPROCESSED_KEYPOINTS_3D = 'preprocessed_keypoints_3d'
KEY_OFFSET_POINTS_3D = 'offset_points_3d'
KEY_SCALE_DISTANCES_3D = 'scale_distances_3d'
KEY_EMBEDDING_MEANS = 'unnormalized_embeddings'
KEY_EMBEDDING_STDDEVS = 'embedding_stddevs'
KEY_EMBEDDING_SAMPLES = 'unnormalized_embedding_samples'
KEY_PREDICTED_KEYPOINTS_3D = 'predicted_keypoints_3d'
KEY_ALL_PREDICTED_KEYPOINTS_3D = 'all_predicted_keypoints_3d'
KEY_FEATURES = 'features'
KEY_CLASS_TARGETS = 'class_targets'
KEY_CLASS_WEIGHTS = 'class_weights'
# Model input keypoint types.
# 2D keypoints from input tables.
MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT = '2D_INPUT'
# 2D projections of 3D keypoints.
MODEL_INPUT_KEYPOINT_TYPE_3D_PROJECTION = '3D_PROJECTION'
# Both 2D keypoints from input tables and 2D projections if 3D keypoints.
MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT_AND_3D_PROJECTION = (
'2D_INPUT_AND_3D_PROJECTION')
# Supported model input keypoint types for training.
SUPPORTED_TRAINING_MODEL_INPUT_KEYPOINT_TYPES = [
MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
MODEL_INPUT_KEYPOINT_TYPE_3D_PROJECTION,
MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT_AND_3D_PROJECTION,
]
# Supported model input keypoint types for inference.
SUPPORTED_INFERENCE_MODEL_INPUT_KEYPOINT_TYPES = [
MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
]
# Model input keypoint mask types.
# No use.
MODEL_INPUT_KEYPOINT_MASK_TYPE_NO_USE = 'NO_USE'
# Masks 2D keypoint coordinates (to 0).
MODEL_INPUT_KEYPOINT_MASK_TYPE_MASK_KEYPOINTS = 'MASK_KEYPOINTS'
# Concatenates with 2D keypoint coordinates as input.
MODEL_INPUT_KEYPOINT_MASK_TYPE_AS_INPUT = 'AS_INPUT'
# Masks and concatenates with 2D keypoint coordinates as input.
MODEL_INPUT_KEYPOINT_MASK_TYPE_MASK_KEYPOINTS_AND_AS_INPUT = (
'MASK_KEYPOINTS_AND_AS_INPUT')
# Supported model input keypoint mask types.
SUPPORTED_MODEL_INPUT_KEYPOINT_MASK_TYPES = [
MODEL_INPUT_KEYPOINT_MASK_TYPE_NO_USE,
MODEL_INPUT_KEYPOINT_MASK_TYPE_MASK_KEYPOINTS,
MODEL_INPUT_KEYPOINT_MASK_TYPE_AS_INPUT,
MODEL_INPUT_KEYPOINT_MASK_TYPE_MASK_KEYPOINTS_AND_AS_INPUT,
]
# Base model types.
# Simple Baseline architecutre: Martinez, et al. A simple yet effective baseline
# for 3d human pose estimation. ICCV 2017.
BASE_MODEL_TYPE_SIMPLE = 'SIMPLE'
# Temporal Simple Baseline.
BASE_MODEL_TYPE_TEMPORAL_SIMPLE = 'TEMPORAL_SIMPLE'
# Temporal Simple Baseline late fusion.
BASE_MODEL_TYPE_TEMPORAL_SIMPLE_LATE_FUSE = 'TEMPORAL_SIMPLE_LATE_FUSE'
# Supported base model types.
SUPPORTED_BASE_MODEL_TYPES = [
BASE_MODEL_TYPE_SIMPLE,
BASE_MODEL_TYPE_TEMPORAL_SIMPLE,
BASE_MODEL_TYPE_TEMPORAL_SIMPLE_LATE_FUSE,
]
# Embedding types.
# Point embedding.
EMBEDDING_TYPE_POINT = 'POINT'
# Gaussian embedding with diagonal covariance matrix.
EMBEDDING_TYPE_GAUSSIAN = 'GAUSSIAN'
# Gaussian embedding with scalar variance.
EMBEDDING_TYPE_GAUSSIAN_SCALAR_VAR = 'GAUSSIAN_SCALAR_VAR'
# Supported embedding types.
SUPPORTED_EMBEDDING_TYPES = [
EMBEDDING_TYPE_POINT,
EMBEDDING_TYPE_GAUSSIAN,
EMBEDDING_TYPE_GAUSSIAN_SCALAR_VAR,
]
# Embedding distance types.
# Distance computed using embedding centers.
DISTANCE_TYPE_CENTER = 'CENTER'
# Distance computed using embedding samples.
DISTANCE_TYPE_SAMPLE = 'SAMPLE'
# Distance computed using both embedding centers and samples.
DISTANCE_TYPE_CENTER_AND_SAMPLE = 'CENTER_AND_SAMPLE'
# Supported distance types.
SUPPORTED_DISTANCE_TYPES = [
DISTANCE_TYPE_CENTER,
DISTANCE_TYPE_SAMPLE,
DISTANCE_TYPE_CENTER_AND_SAMPLE,
]
# Embedding distance pair types.
# Reduces distances between all pairs between two lists of samples.
DISTANCE_PAIR_TYPE_ALL_PAIRS = 'ALL_PAIRS'
# Reduces distances only between corrresponding pairs between two lists of
# samples.
DISTANCE_PAIR_TYPE_CORRESPONDING_PAIRS = 'CORRESPONDING_PAIRS'
# Supported distance pair types.
SUPPORTED_DISTANCE_PAIR_TYPES = [
DISTANCE_PAIR_TYPE_ALL_PAIRS,
DISTANCE_PAIR_TYPE_CORRESPONDING_PAIRS,
]
# Embedding distance kernels.
# Squared L2 distance.
DISTANCE_KERNEL_SQUARED_L2 = 'SQUARED_L2'
# L2-based sigmoid matching probability.
DISTANCE_KERNEL_L2_SIGMOID_MATCHING_PROB = 'L2_SIGMOID_MATCHING_PROB'
# Squared L2-based sigmoid matching probability.
DISTANCE_KERNEL_SQUARED_L2_SIGMOID_MATCHING_PROB = (
'SQUARED_L2_SIGMOID_MATCHING_PROB')
# Expected likelihood.
DISTANCE_KERNEL_EXPECTED_LIKELIHOOD = 'EXPECTED_LIKELIHOOD'
# Supported distance kernels.
SUPPORTED_DISTANCE_KERNELS = [
DISTANCE_KERNEL_SQUARED_L2,
DISTANCE_KERNEL_L2_SIGMOID_MATCHING_PROB,
DISTANCE_KERNEL_SQUARED_L2_SIGMOID_MATCHING_PROB,
DISTANCE_KERNEL_EXPECTED_LIKELIHOOD,
]
# Embedding distance reductions.
# Mean of all distances.
DISTANCE_REDUCTION_MEAN = 'MEAN'
# Mean of distances not larger than the median of all distances.
DISTANCE_REDUCTION_LOWER_HALF_MEAN = 'LOWER_HALF_MEAN'
# Negative logarithm of the mean of all distances.
DISTANCE_REDUCTION_NEG_LOG_MEAN = 'NEG_LOG_MEAN'
# Negative logarithm of mean of distances no larger than the distance median.
DISTANCE_REDUCTION_LOWER_HALF_NEG_LOG_MEAN = 'LOWER_HALF_NEG_LOG_MEAN'
# One minus the mean of all distances.
DISTANCE_REDUCTION_ONE_MINUS_MEAN = 'ONE_MINUS_MEAN'
# Supported embedding distance reductions.
SUPPORTED_PAIRWISE_DISTANCE_REDUCTIONS = [
DISTANCE_REDUCTION_MEAN,
DISTANCE_REDUCTION_LOWER_HALF_MEAN,
DISTANCE_REDUCTION_NEG_LOG_MEAN,
DISTANCE_REDUCTION_LOWER_HALF_NEG_LOG_MEAN,
DISTANCE_REDUCTION_ONE_MINUS_MEAN,
]
SUPPORTED_COMPONENTWISE_DISTANCE_REDUCTIONS = [DISTANCE_REDUCTION_MEAN]
# 3D keypoint distance measurement type.
# Normalized/Procrustes-aligned MPJPE.
KEYPOINT_DISTANCE_TYPE_MPJPE = 'MPJPE'
# Supported 3D keypoint distance measurement type.
SUPPORTED_KEYPOINT_DISTANCE_TYPES = [KEYPOINT_DISTANCE_TYPE_MPJPE]
# Activation function names.
ACTIVATION_FN_NONE = 'NONE'
ACTIVATION_FN_RELU = 'RELU'
def validate(value, supported_values):
"""Validates if value is supported.
Args:
value: A Python variable.
supported_values: A list of supported variable values.
Raises:
ValueError: If `value` is not in `supported_values`.
"""
if value not in supported_values:
raise ValueError('Unsupported value for `%s`: `%s`.' % (value.name, value))
| google-research/google-research | poem/core/common.py | Python | apache-2.0 | 8,170 | [
"Gaussian"
] | f9a76acb1a959496aff1530e50c9593ab06a60b21a9d6e2e8c8c70311b98c448 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# Now create the RenderWindow, Renderer and Interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
cowReader = vtk.vtkOBJReader()
cowReader.SetFileName(VTK_DATA_ROOT + "/Data/Viewpoint/cow.obj")
plane = vtk.vtkPlane()
plane.SetNormal(1, 0, 0)
cowClipper = vtk.vtkClipPolyData()
cowClipper.SetInputConnection(cowReader.GetOutputPort())
cowClipper.SetClipFunction(plane)
cellNormals = vtk.vtkPolyDataNormals()
cellNormals.SetInputConnection(cowClipper.GetOutputPort())
cellNormals.ComputePointNormalsOn()
cellNormals.ComputeCellNormalsOn()
reflect = vtk.vtkTransform()
reflect.Scale(-1, 1, 1)
cowReflect = vtk.vtkTransformPolyDataFilter()
cowReflect.SetTransform(reflect)
cowReflect.SetInputConnection(cellNormals.GetOutputPort())
cowReverse = vtk.vtkReverseSense()
cowReverse.SetInputConnection(cowReflect.GetOutputPort())
cowReverse.ReverseNormalsOn()
cowReverse.ReverseCellsOff()
reflectedMapper = vtk.vtkPolyDataMapper()
reflectedMapper.SetInputConnection(cowReverse.GetOutputPort())
reflected = vtk.vtkActor()
reflected.SetMapper(reflectedMapper)
reflected.GetProperty().SetDiffuseColor(GetRGBColor('flesh'))
reflected.GetProperty().SetDiffuse(.8)
reflected.GetProperty().SetSpecular(.5)
reflected.GetProperty().SetSpecularPower(30)
reflected.GetProperty().FrontfaceCullingOn()
ren1.AddActor(reflected)
cowMapper = vtk.vtkPolyDataMapper()
cowMapper.SetInputConnection(cowClipper.GetOutputPort())
cow = vtk.vtkActor()
cow.SetMapper(cowMapper)
ren1.AddActor(cow)
ren1.SetBackground(.1, .2, .4)
renWin.SetSize(320, 240)
ren1.ResetCamera()
ren1.GetActiveCamera().SetViewUp(0, 1, 0)
ren1.GetActiveCamera().Azimuth(180)
ren1.GetActiveCamera().Dolly(1.75)
ren1.ResetCameraClippingRange()
iren.Initialize()
# render the image
#iren.Start()
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Core/Testing/Python/reverseNormals.py | Python | gpl-3.0 | 2,227 | [
"VTK"
] | 0d4a9710817e6ce66359cad2482d6fd67d41f84c81e8b36572da2bd6a7110040 |
from keys import *
from simulation_params import *
import nest
import numpy.random as random
# Neuron parameters
hh_neuronparams = {'E_L': -70., # Resting membrane potential in mV
'V_T': -63., # Voltage offset that controls dynamics.
# -63mV results in a threshold around -50mV.
'C_m': 2., # Capacity of the membrane in pF 1
't_ref': 2., # Duration of refractory period (V_m = V_reset) in ms
'tau_syn_ex': 5., # Time constant of postsynaptic excitatory currents in ms
'tau_syn_in': 10. # Time constant of postsynaptic inhibitory currents in ms
}
# Synapse common parameters
STDP_synapseparams = {
'alpha': random.normal(0.5, 5.0), # Asymmetry parameter (scales depressing increments as alpha*lambda)
'lambda': 0.5 # Step size
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': random.uniform(low=1.0, high=1.3), # Distribution of delay values for connections
'weight': w_Glu, # Weight (power) of synapse
'Wmax': 20.}, **STDP_synapseparams) # Maximum allowed weight
# GABA synapse
STDP_synparams_GABA = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_GABA,
'Wmax': -20.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_ACh,
'Wmax': 20.}, **STDP_synapseparams)
# Noradrenaline excitatory synapse
NORA_synparams_ex = dict({'delay': 1.,
'weight': w_NA_ex,
'Wmax': 100.})
# Noradrenaline inhibitory synapse
NORA_synparams_in = dict({'delay': 1.,
'weight': w_NA_in,
'Wmax': -100.})
# Dopamine excitatory synapse
DOPA_synparams_ex = dict({'delay': 1.,
'weight': w_DA_ex,
'Wmax': 100.})
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'delay': 1.,
'weight': w_DA_in,
'Wmax': -100.})
# Serotonin excitatory synapse
SERO_synparams_ex = dict({'delay': 1.,
'weight': w_SERO_ex,
'Wmax': 100.})
# Serotonin inhibitory synapse
SERO_synparams_in = dict({'delay': 1.,
'weight': w_SERO_in,
'Wmax': -100.})
# Dictionary of synapses with keys and their parameters
synapses = {GABA: (gaba_synapse, w_GABA ),
Glu: (glu_synapse, w_Glu ),
ACh: (ach_synapse, w_ACh ),
NA_ex: (nora_synapse_ex, w_NA_ex),
NA_in: (nora_synapse_in, w_NA_in),
DA_ex: (dopa_synapse_ex, w_DA_ex),
DA_in: (dopa_synapse_in, w_DA_in),
SERO_ex: (sero_synapse_ex, w_SERO_ex),
SERO_in: (sero_synapse_in, w_SERO_in),
}
# Parameters for generator
static_syn = {
'weight': w_Glu * 5,
'delay': pg_delay
}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
| research-team/NEUCOGAR | NEST/cube/integration/excitement/synapses.py | Python | gpl-2.0 | 3,707 | [
"NEURON"
] | 21ad5a851e5f4004d56c610b8f4b9e1399d4b16276d3416e93f17b9228ced611 |
#!/bin/env python
#
# Copyright 2013-2014 Graham McVicker and Bryce van de Geijn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
"""
usage: import_bam_ref_nonref_counts.py [-h] [--assembly ASSEMBLY]
[--snp_index_track SNP_INDEX_TRACK]
[--snp_track SNP_TRACK]
[--data_type {uint8,uint16}]
[--hap_track HAP_TRACK]
[--samples_file SAMPLES_FILE]
[--individual INDIVIDUAL]
[--population POPULATION]
ref_as_count_track alt_as_count_track
other_as_count_track read_count_track
bam_filenames [bam_filenames ...]
positional arguments:
ref_as_count_track name of track to store counts of reads that match
reference
alt_as_count_track name of track to store counts of reads that match
alternate
other_as_count_track name of track to store counts of reads that match
neither reference or alternate
read_count_track name of track to store read counts in--positions of
LEFT end of read are used
bam_filenames BAM file(s) to read mapped reads from. BAMs must be
sorted and indexed.
optional arguments:
-h, --help show this help message and exit
--assembly ASSEMBLY genome assembly that reads were mapped to (e.g. hg19)
--snp_index_track SNP_INDEX_TRACK
name of SNP index track
(default=1000genomes/snp_index)
--snp_track SNP_TRACK
name of track containing table of SNPs
(default=1000genomes/snp_tab)
--data_type {uint8,uint16}
data type of stored counts; uint8 takes up less disk
space but has a maximum value of 255 (default=uint8)
--hap_track HAP_TRACK
name of haplotype track; if supplied when read
overlaps multiple SNPs counts are randomly assigned to
ONE of the overlapping HETEROZYGOUS SNPs; if not
supplied counts are randomly assigned to ONE of
overlapping SNPs (regardless of genotype)
--samples_file SAMPLES_FILE
path to file containing a list of individual
identifiers in the same order that the individuals
appear in the haplotype table. The file is assumed to
be in the same format as the sample file used by
IMPUTE2. This file contains 4 columns: 'sample'
'population' 'group' 'sex'. This must be provided if
hap_track is is specified. The sex column is not
currently used and can be omitted
--individual INDIVIDUAL
identifier for individual, used to determine which
SNPs are heterozygous (e.g. 18505). Must be provided
if hap_track is specified and must match one of the
individuals in the provided samples_file
--population POPULATION
indicate that haplotype table contains only
individuals from this population or group (e.g. YRI or
EUR)
This program reads BAM files and counts the number of reads that match
the alternate and reference allele at every SNP position stored in the
/impute2/snps HDF5 track. The read counts are stored in specified
ref_track, alt_track and other_track HDF5 tracks. Additionally counts
of all reads are stored in another track (at the left-most position of
the reads).
This program does not perform filtering of reads based on mappability.
It is assumed that this filtering will be done prior to calling this
script.
Reads that overlap known indels are not included in allele-specific
counts.
We are currently working on an improved allele-specific mapping method
so that criteria 1 and 2 can be relaxed can be done at the level of
the BAM (before running this script).
This program requires the use of the genome library, which can be
downloaded from here:
https://github.com/gmcvicker/genome
"""
import sys
import os
import gzip
import tables
import argparse
import numpy as np
import pysam
import genome.db
import genome.coord
import genome.trackstat as trackstat
# codes used by pysam for aligned read CIGAR strings
BAM_CMATCH = 0 # M
BAM_CINS = 1 # I
BAM_CDEL = 2 # D
BAM_CREF_SKIP = 3 # N
BAM_CSOFT_CLIP = 4 # S
BAM_CHARD_CLIP = 5 # H
BAM_CPAD = 6 # P
BAM_CEQUAL = 7 # =
BAM_CDIFF = 8 # X
BAM_CIGAR_DICT = {0 : "M",
1 : "I",
2 : "D",
3 : "N",
4 : "S",
5 : "H",
6 : "P",
7 : "=",
8 : "X"}
SNP_UNDEF = -1
MAX_UINT8_COUNT = 255
MAX_UINT16_COUNT = 65535
def create_carray(track, chrom, data_type):
if data_type == "uint8":
atom = tables.UInt8Atom(dflt=0)
elif data_type == "uint16":
atom = tables.UInt16Atom(dflt=0)
else:
raise NotImplementedError("unsupported datatype %s" % data_type)
zlib_filter = tables.Filters(complevel=1, complib="zlib")
# create CArray for this chromosome
shape = [chrom.length]
carray = track.h5f.createCArray(track.h5f.root, chrom.name,
atom, shape, filters=zlib_filter)
return carray
def get_carray(track, chrom):
return track.h5f.getNode("/%s" % chrom)
def is_indel(snp):
if (len(snp['allele1']) != 1) or (len(snp['allele2'])) != 1:
return True
def dump_read(f, read):
cigar_str = " ".join(["%s:%d" % (BAM_CIGAR_DICT[c[0]], c[1]) for c in read.cigar])
f.write("pos: %d\n"
"aend: %d\n"
"alen (len of aligned portion of read on genome): %d\n"
"qstart: %d\n"
"qend: %d\n"
"qlen (len of aligned qry seq): %d\n"
"rlen (read len): %d\n"
"tlen (insert size): %d\n"
"cigar: %s\n"
"seq: %s\n"
% (read.pos, read.aend, read.alen, read.qstart, read.qend,
read.qlen, read.rlen, read.tlen, cigar_str, read.seq))
def get_sam_iter(samfile, chrom):
try:
sam_iter = samfile.fetch(reference=chrom.name,
start=1, end=chrom.length)
except ValueError as ve:
sys.stderr.write("%s\n" % str(ve))
# could not find chromosome, try stripping leading 'chr'
# E.g. for drosophila, sometimes 'chr2L' is used but
# othertimes just '2L' is used. Annoying!
chrom_name = chrom.name.replace("chr", "")
sys.stderr.write("WARNING: %s does not exist in BAM file, "
"trying %s instead\n" % (chrom.name, chrom_name))
try:
sam_iter = samfile.fetch(reference=chrom_name,
start=1, end=chrom.length)
except ValueError:
sys.stderr.write("WARNING: %s does not exist in BAM file, "
"skipping chromosome\n" % chrom_name)
sam_iter = iter([])
return sam_iter
def choose_overlap_snp(read, snp_tab, snp_index_array, hap_tab, ind_idx):
"""Picks out a single SNP from those that the read overlaps.
Returns a tuple containing 4 elements: [0] the index of the SNP in
the SNP table, [1] the offset into the read sequence, [2] flag
indicating whether the read was 'split' (i.e. was a spliced
read), [3] flag indicating whether read overlaps known indel.
If there are no overlapping SNPs or the read cannot be processed,
(None, None, is_split, overlap_indel) is returned instead.
"""
read_offsets = []
snp_idx = []
read_start_idx = 0
genome_start_idx = read.pos
n_match_segments = 0
is_split = False
overlap_indel = False
for cig in read.cigar:
op = cig[0]
op_len = cig[1]
if op == BAM_CMATCH:
# this is a block of match/mismatch in read alignment
read_end = read_start_idx + op_len
genome_end = genome_start_idx + op_len
# get offsets of any SNPs that this read overlaps
idx = snp_index_array[genome_start_idx:genome_end]
is_def = np.where(idx != SNP_UNDEF)[0]
read_offsets.extend(read_start_idx + is_def)
snp_idx.extend(idx[is_def])
read_start_idx = read_end
genome_start_idx = genome_end
n_match_segments += 1
elif op == BAM_CREF_SKIP:
# spliced read, skip over this region of genome
genome_start_idx += op_len
is_split = True
else:
sys.stderr.write("skipping because contains CIGAR code %s "
" which is not currently implemented" % BAM_CIGAR_DICT[op])
# are any of the SNPs indels? If so, discard.
for i in snp_idx:
if is_indel(snp_tab[i]):
overlap_indel = True
return (None, None, is_split, overlap_indel)
n_overlap_snps = len(read_offsets)
if n_overlap_snps == 0:
# no SNPs overlap this read
return (None, None, is_split, overlap_indel)
if hap_tab:
# genotype info is provided by haplotype table
# pull out subset of overlapping SNPs that are heterozygous
# in this individual
het_read_offsets = []
het_snp_idx = []
for (i, read_offset) in zip(snp_idx, read_offsets):
haps = hap_tab[i, (ind_idx*2):(ind_idx*2 + 2)]
if ind_idx*2 > hap_tab.shape[1]:
raise ValueError("index of individual (%d) is >= number of "
"individuals in haplotype_tab (%d). probably "
"need to specify --population or use a different "
"--samples_tab" % (ind_idx, hap_tab.shape[1]/2))
if haps[0] != haps[1]:
# this is a het
het_read_offsets.append(read_offset)
het_snp_idx.append(i)
n_overlap_hets = len(het_read_offsets)
if n_overlap_hets == 0:
# none of the overlapping SNPs are hets
return (None, None, is_split, overlap_indel)
if n_overlap_hets == 1:
# only one overlapping SNP is a het
return (het_snp_idx[0], het_read_offsets[0], is_split, overlap_indel)
# choose ONE overlapping HETEROZYGOUS SNP randomly to add counts to
# we don't want to count same read multiple times
r = np.random.randint(0, n_overlap_hets-1)
return (het_snp_idx[r], het_read_offsets[r], is_split, overlap_indel)
else:
# We don't have haplotype tab, so we don't know which SNPs are
# heterozygous in this individual. But we can still tell
# whether read sequence matches reference or non-reference
# allele. Choose ONE overlapping SNP randomly to add counts to
if n_overlap_snps == 1:
return (snp_idx[0], read_offsets[0], is_split, overlap_indel)
else:
r = np.random.randint(0, n_overlap_snps-1)
return (snp_idx[r], read_offsets[r], is_split, overlap_indel)
def add_read_count(read, chrom, ref_array, alt_array, other_array,
read_count_array, snp_index_array, snp_tab, hap_tab,
warned_pos, max_count, ind_idx):
# pysam positions start at 0
start = read.pos+1
end = read.aend
if start < 1 or end > chrom.length:
sys.stderr.write("WARNING: skipping read aligned past end of "
"chromosome. read: %d-%d, %s:1-%d\n" %
(start, end, chrom.name, chrom.length))
return
if read.qlen != read.rlen:
sys.stderr.write("WARNING skipping read: handling of "
"partially mapped reads not implemented\n")
return
# look for SNPs that overlap mapped read position, and if there
# are more than one, choose one at random
snp_idx, read_offset, is_split, overlap_indel = \
choose_overlap_snp(read, snp_tab, snp_index_array, hap_tab, ind_idx)
if overlap_indel:
return
# store counts of reads at start position
if read_count_array[start-1] < max_count:
read_count_array[start-1] += 1
else:
if not start in warned_pos:
sys.stderr.write("WARNING read count at position %d "
"exceeds max %d\n" % (start, max_count))
warned_pos[start] = True
if snp_idx is None:
return
snp = snp_tab[snp_idx]
base = read.seq[read_offset]
snp_pos = snp['pos']
if base == snp['allele1']:
# matches reference allele
if ref_array[snp_pos-1] < max_count:
ref_array[snp_pos-1] += 1
elif not snp_pos in warned_pos:
sys.stderr.write("WARNING ref allele count at position %d "
"exceeds max %d\n" % (snp_pos, max_count))
warned_pos[snp_pos] = True
elif base == snp['allele2']:
# matches alternate allele
if alt_array[snp_pos-1] < max_count:
alt_array[snp_pos-1] += 1
elif not snp_pos in warned_pos:
sys.stderr.write("WARNING alt allele count at position %d "
"exceeds max %d\n" % (snp_pos, max_count))
warned_pos[snp_pos] = True
else:
# matches neither
if other_array[snp_pos-1] < max_count:
other_array[snp_pos-1] += 1
elif not snp_pos in warned_pos:
sys.stderr.write("WARNING other allele count at position %d "
"exceeds max %d\n" % (snp_pos, max_count))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--assembly", help="genome assembly that reads "
"were mapped to (e.g. hg19)", default=None)
parser.add_argument("--snp_index_track",
help="name of SNP index track "
"(default=1000genomes/snp_index)",
default="1000genomes/snp_index")
parser.add_argument("--snp_track",
help="name of track containing table of SNPs"
" (default=1000genomes/snp_tab)",
default="1000genomes/snp_tab")
parser.add_argument("--data_type",
help="data type of stored counts; uint8 takes "
"up less disk space but has a maximum value of 255 "
"(default=uint8)", choices=("uint8", "uint16"),
default="uint8")
parser.add_argument("--hap_track",
help="name of haplotype track; if supplied when "
"read overlaps multiple SNPs counts are randomly "
"assigned to ONE of the overlapping HETEROZYGOUS "
"SNPs; if not supplied counts are randomly assigned "
"to ONE of overlapping SNPs (regardless of genotype)",
default=None)
parser.add_argument("--samples_file",
help="path to file containing a list of individual "
"identifiers in the same order that the individuals appear "
"in the haplotype table. The file is assumed to be in the "
"same format as the sample file used by IMPUTE2. This file "
"contains 4 columns: 'sample' 'population' 'group' 'sex'. "
"This must be provided if hap_track is is specified. "
"The sex column is not currently used and can be omitted",
default=None)
parser.add_argument("--individual",
help="identifier for individual, used to determine which "
"SNPs are heterozygous (e.g. 18505). Must be "
"provided if hap_track is specified and must match "
"one of the individuals in the provided samples_file",
default=None)
parser.add_argument("--population",
help="indicate that haplotype table contains only "
"individuals from this population or group "
"(e.g. YRI or EUR)", default=None)
parser.add_argument("ref_as_count_track",
help="name of track to store counts of reads "
"that match reference")
parser.add_argument("alt_as_count_track",
help="name of track to store counts of reads "
"that match alternate")
parser.add_argument("other_as_count_track",
help="name of track to store counts of reads "
"that match neither reference or alternate")
parser.add_argument("read_count_track",
help="name of track to store read counts in--"
"positions of LEFT end of read are used")
parser.add_argument("bam_filenames", action="store", nargs="+",
help="BAM file(s) to read mapped reads from. "
"BAMs must be sorted and indexed.")
args = parser.parse_args()
if args.hap_track and (args.individual is None or args.samples_file is None):
parser.error("--indidivual and --samples_file arguments "
"must be provided when --hap_track is specified")
return args
def lookup_individual_index(samples_file, ind_name, population=None):
"""Gets the index of individual that is used
to lookup information in the genotype and haplotype tables"""
f = open(samples_file)
if population:
p = population.lower()
else:
p = None
idx = 0
for line in f:
if line.startswith("samples"):
# header line
continue
words = line.rstrip().split()
name = words[0].replace("NA", "")
if len(words) > 1:
pop = words[1].lower()
else:
pop = ""
if len(words) > 2:
group = words[2].lower()
else:
group = ""
# if specified, only consider a single population or group
if p and pop != p and group != p:
continue
if name == ind_name:
f.close()
return idx
idx += 1
raise ValueError("individual %s (with population=%s) "
"is not in samples file %s" %
(ind_name, population, samples_file))
def main():
args = parse_args()
# create a database track
gdb = genome.db.GenomeDB(assembly=args.assembly)
ref_count_track = gdb.create_track(args.ref_as_count_track)
alt_count_track = gdb.create_track(args.alt_as_count_track)
other_count_track = gdb.create_track(args.other_as_count_track)
read_count_track = gdb.create_track(args.read_count_track)
output_tracks = [ref_count_track, alt_count_track,
other_count_track, read_count_track]
snp_track = gdb.open_track(args.snp_track)
snp_index_track = gdb.open_track(args.snp_index_track)
if args.hap_track:
hap_track = gdb.open_track(args.hap_track)
ind_idx = lookup_individual_index(args.samples_file,
args.individual, args.population)
else:
hap_track = None
ind_idx = None
chrom_dict = {}
count = 0
# initialize every chromosome in output tracks
for chrom in gdb.get_all_chromosomes():
for track in output_tracks:
create_carray(track, chrom, args.data_type)
chrom_dict[chrom.name] = chrom
count = 0
if args.data_type == "uint8":
max_count = MAX_UINT8_COUNT
elif args.data_type == "uint16":
max_count = MAX_UINT16_COUNT
else:
raise NotImplementedError("unsupported datatype %s" % args.data_type)
for chrom in gdb.get_chromosomes(get_x=False):
sys.stderr.write("%s\n" % chrom.name)
warned_pos = {}
# initialize count arrays for this chromosome to 0
ref_carray = get_carray(ref_count_track, chrom)
alt_carray = get_carray(alt_count_track, chrom)
other_carray = get_carray(other_count_track, chrom)
read_count_carray = get_carray(read_count_track, chrom)
ref_array = np.zeros(chrom.length, np.uint8)
alt_array = np.zeros(chrom.length, np.uint8)
other_array = np.zeros(chrom.length, np.uint8)
read_count_array = np.zeros(chrom.length, np.uint8)
# fetch SNP info for this chromosome
sys.stderr.write("fetching SNPs\n")
snp_tab = snp_track.h5f.getNode("/%s" % chrom.name)
snp_index_array = snp_index_track.get_nparray(chrom)
if hap_track:
hap_tab = hap_track.h5f.getNode("/%s" % chrom.name)
else:
hap_tab = None
# loop over all BAM files, pulling out reads
# for this chromosome
for bam_filename in args.bam_filenames:
sys.stderr.write("reading from file %s\n" % bam_filename)
samfile = pysam.Samfile(bam_filename, "rb")
for read in get_sam_iter(samfile, chrom):
count += 1
if count == 10000:
sys.stderr.write(".")
count = 0
add_read_count(read, chrom, ref_array, alt_array,
other_array, read_count_array,
snp_index_array, snp_tab, hap_tab,
warned_pos, max_count, ind_idx)
# store results for this chromosome
ref_carray[:] = ref_array
alt_carray[:] = alt_array
other_carray[:] = other_array
read_count_carray[:] = read_count_array
sys.stderr.write("\n")
samfile.close()
# set track statistics and close HDF5 files
sys.stderr.write("setting track statistics\n")
for track in output_tracks:
sys.stderr.write("%s\n" % track.name)
trackstat.set_stats(gdb, track)
track.close()
snp_track.close()
snp_index_track.close()
if hap_track:
hap_track.close()
main()
| gmcvicker/CHT | import_bam_ref_nonref_counts.py | Python | apache-2.0 | 23,722 | [
"pysam"
] | 6cecac51ef4c848ac2c1784a8b70a357af9d0f9ce18a362dcea48e9d54aa3353 |
import math
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class CTCTestBase(object):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 2, 3)).astype(self.dtype)
self.t = numpy.array([[0, 1], [1, 0]]).astype(numpy.int32)
self.l = numpy.array([[2, 0, 2, 1, 2],
[2, 1, 2, 0, 2]]).astype(numpy.int32)
self.blank_symbol = 2
self.x_length = numpy.full((len(self.x[0]),), len(self.x), dtype='i')
self.l_length = numpy.full((len(self.t),), len(self.t[0]), dtype='i')
self.use_length = True
if self.reduce == 'mean':
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
else:
self.gy = numpy.random.uniform(-1, 1, (2,)).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2}
self.check_backward_options = {
'atol': 1e-3, 'dtype': numpy.float64}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-4}
# recursive forward computation.
def alpha(self, x, l, t, u):
if u < 0:
return 0.0
if t == 0:
if u == 0:
return x[0][self.blank_symbol]
elif u == 1:
return x[0][l[1]]
else:
return 0.0
elif l[u] == self.blank_symbol or l[u] == l[u - 2]:
return (x[t][l[u]] *
(self.alpha(x, l, t - 1, u - 1) +
self.alpha(x, l, t - 1, u)))
else:
return (x[t][l[u]] *
(self.alpha(x, l, t - 1, u - 2) +
self.alpha(x, l, t - 1, u - 1) +
self.alpha(x, l, t - 1, u)))
def check_forward(self, t_data, xs_data, l_length, x_length,
wrap_variable=True):
if wrap_variable:
x = tuple(chainer.Variable(x_data) for x_data in xs_data)
t = chainer.Variable(t_data)
else:
x = xs_data
t = t_data
args = (x, t, self.blank_symbol)
if self.use_length:
if wrap_variable:
args += (chainer.Variable(x_length),
chainer.Variable(l_length))
else:
args += (x_length, l_length)
loss = functions.connectionist_temporal_classification(
*args, reduce=self.reduce).data
# compute expected value by recursive computation.
xp = backend.get_array_module(self.x)
xt = xp.swapaxes(self.x, 0, 1)
for b in range(xt.shape[0]):
for t in range(xt.shape[1]):
xt[b][t] = numpy.exp(xt[b][t]) / numpy.sum(numpy.exp(xt[b][t]))
batch_size = xt.shape[0]
path_length = 2 * l_length + 1
loss_expect = xp.zeros((batch_size,), dtype=self.dtype)
for i in range(batch_size):
xtb, lb, xlb, plb = xt[i], self.l[i], x_length[i], path_length[i]
loss_expect[i] = -math.log(
self.alpha(xtb, lb, int(xlb - 1), int(plb - 1)) +
self.alpha(xtb, lb, int(xlb - 1), int(plb - 2)))
if self.reduce == 'mean':
loss_expect = xp.mean(loss_expect)
testing.assert_allclose(
loss_expect, loss, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.t, tuple(self.x),
self.l_length, self.x_length)
def test_forward_without_wrap_cpu(self):
self.check_forward(self.t, tuple(self.x),
self.l_length, self.x_length, wrap_variable=False)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.t),
tuple(cuda.to_gpu(x_data) for x_data in self.x),
cuda.to_gpu(self.l_length),
cuda.to_gpu(self.x_length))
@attr.gpu
def test_forward_without_wrap_gpu(self):
self.check_forward(cuda.to_gpu(self.t),
tuple(cuda.to_gpu(x_data) for x_data in self.x),
cuda.to_gpu(self.l_length),
cuda.to_gpu(self.x_length),
wrap_variable=False)
# expected value(via numerical differentiation) from t_data
def check_backward(self, t_data, xs_data, l_length, x_length, gy_data):
def f(input_length, label_length, t, *x):
return functions.connectionist_temporal_classification(
x, t, self.blank_symbol, x_length, l_length,
reduce=self.reduce)
gradient_check.check_backward(
f, (x_length, l_length, t_data) + xs_data, gy_data,
eps=1e-2, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.t, tuple(self.x),
self.l_length, self.x_length,
self.gy)
@condition.retry(3)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.t),
tuple(cuda.to_gpu(x_data) for x_data in self.x),
cuda.to_gpu(self.l_length),
cuda.to_gpu(self.x_length),
cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTC(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithoutLength(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.use_length = False
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithLabelPadding(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.l_length[0] = 1
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithInputPadding(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.x_length[0] = 3
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithAllPadding(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.x_length[...] = 3
self.l_length[...] = 1
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithRepeatedLabel(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.t = numpy.array([[0, 1, 1], [0, 1, 0]]).astype(numpy.int32)
self.l = numpy.array([[2, 0, 2, 1, 2, 1, 2],
[2, 0, 2, 1, 2, 0, 2]]).astype(numpy.int32)
self.l_length = numpy.full((len(self.t),), len(self.t[0]), dtype='i')
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCBlankSymbol(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.x = numpy.random.uniform(-1, 1, (4, 2, 4)).astype(self.dtype)
self.l = numpy.array([[3, 0, 3, 1, 3],
[3, 1, 3, 0, 3]]).astype(numpy.int32)
self.blank_symbol = 3
class TestCTCUseNoBackpropMode(unittest.TestCase):
def test_no_backprop_mode(self):
xs_data = numpy.random.uniform(-1, 1, (4, 2, 3)).astype(numpy.float32)
t_data = numpy.array([[0, 1], [1, 0]]).astype(numpy.int32)
with chainer.no_backprop_mode():
x = [chainer.Variable(x_data) for x_data in xs_data]
t = chainer.Variable(t_data)
functions.connectionist_temporal_classification(x, t, 2)
class TestCTCError(unittest.TestCase):
def test_not_iterable(self):
x = chainer.Variable(numpy.zeros((4, 2, 3), numpy.float32))
t = chainer.Variable(numpy.zeros((2, 2), numpy.int32))
with self.assertRaises(TypeError):
functions.connectionist_temporal_classification(x, t, 0)
class TestCTCInvalidReductionOption(unittest.TestCase):
def test_not_iterable(self):
x = chainer.Variable(numpy.zeros((4, 2, 3), numpy.float32))
t = chainer.Variable(numpy.zeros((2, 2), numpy.int32))
with self.assertRaises(ValueError):
functions.connectionist_temporal_classification(
tuple(x), t, 0, reduce='invalid_option')
testing.run_module(__name__, __file__)
| okuta/chainer | tests/chainer_tests/functions_tests/loss_tests/test_ctc.py | Python | mit | 9,219 | [
"xTB"
] | c437c61952e952a0768b371660d92ecd05414f529aa9483b9a488ac73c25cec0 |
#!/usr/bin/python3
# direct translation of extract_wb in python using as little external deps as possible
from __future__ import print_function
import sys
import os
import xml.etree.ElementTree as ET
import subprocess
import shlex
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if len(sys.argv) < 2 :
sys.exit("Usage: extract_wb <file1> [file2] ...")
IGNORED_PRESETS = {"Auto", "Kelvin", "Measured", "AsShot", "As Shot", "Preset",
"Natural Auto", "Multi Auto", "Color Temperature Enhancement",
"One Touch WB 1", "One Touch WB 2", "One Touch WB 3",
"One Touch WB 4", "Custom WB 1", "Auto0", "Auto1", "Auto2",
"Custom", "CWB1", "CWB2", "CWB3", "CWB4", "Black",
"Illuminator1", "Illuminator2", "Uncorrected"}
FL_PRESET_REPLACE = {
"Fluorescent" : "CoolWhiteFluorescent",
"FluorescentP1" : "DayWhiteFluorescent",
"FluorescentP2" : "DaylightFluorescent",
"FluorescentM1" : "WarmWhiteFluorescent",
"FluorescentD" : "DaylightFluorescent",
"FluorescentN" : "NeutralFluorescent",
"FluorescentW" : "WhiteFluorescent",
"Daylight Fluorescent" : "DaylightFluorescent",
"Day White Fluorescent" : "DayWhiteFluorescent",
"White Fluorescent" : "WhiteFluorescent",
"Unknown (0x600)" : "Underwater",
"Sunny" : "DirectSunlight",
"Fine Weather" : "DirectSunlight",
"Tungsten (Incandescent)" : "Tungsten",
"ISO Studio Tungsten" : "Tungsten",
"Cool WHT FL" : "CoolWhiteFluorescent",
"Daylight FL" : "DaylightFluorescent",
"Warm WHT FL" : "WarmWhiteFluorescent",
"Warm White Fluorescent" : "WarmWhiteFluorescent",
"White FL" : "WhiteFluorescent",
"Mercury Lamp" : "HighTempMercuryVaporFluorescent",
"Day White FL" : "DayWhiteFluorescent",
"Sodium Lamp" : "SodiumVaporFluorescent",
"3000K (Tungsten light)" : "Tungsten",
"4000K (Cool white fluorescent)" : "CoolWhiteFluorescent",
"5300K (Fine Weather)" : "Daylight",
"5500K (Flash)" : "Flash",
"6000K (Cloudy)" : "Cloudy",
"7500K (Fine Weather with Shade)" : "Shade",
}
PRESET_ORDER = ["DirectSunlight", "Daylight", "D55", "Shade","Cloudy",
"Tungsten", "Incandescent","Fluorescent",
"WarmWhiteFluorescent", "CoolWhiteFluorescent",
"DayWhiteFluorescent","DaylightFluorescent",
"DaylightFluorescent", "NeutralFluorescent", "WhiteFluorescent",
"HighTempMercuryVaporFluorescent", "HTMercury",
"SodiumVaporFluorescent", "Underwater", "Flash", "Unknown"]
PRESET_SORT_MAPPING = {}
for index,name in enumerate(PRESET_ORDER):
PRESET_SORT_MAPPING[name] = index + 1
cams_from_source = os.path.dirname(os.path.abspath(__file__)) + "/../src/external/rawspeed/data/cameras.xml"
cams_from_dist = os.path.dirname(os.path.abspath(__file__)) + "/../rawspeed/cameras.xml"
CAMERAS = os.path.abspath(cams_from_source) if os.path.exists(os.path.abspath(cams_from_source)) else os.path.abspath(cams_from_dist)
if not os.path.exists(CAMERAS):
sys.exit("Can't find cameras mapping file, should be in {0}".format(CAMERAS))
exif_name_map = {}
xml_doc = ET.parse(CAMERAS)
for camera in xml_doc.getroot().findall('Camera'):
maker = exif_maker = camera.get('make')
model = exif_model = camera.get('model')
exif_id = maker,model
if camera.find('ID') is not None:
cid = camera.find('ID')
maker = cid.get('make')
model = cid.get('model')
exif_name_map[exif_id] = maker,model
for alias in camera.findall('Aliases/Alias'):
exif_model = alias.text
exif_id = exif_maker, exif_model
exif_name_map[exif_id] = maker,model
found_presets = []
for filename in sys.argv[1:]:
red = green = blue = maker = model = preset = None
finetune = fl_count = rlevel = blevel = glevel = 0
listed_presets = []
preset_names = {}
gm_skew = False
command = "exiftool -Make -Model \"-WBType*\" \"-WB_*\" \"-ColorTemp*\" "\
"-WhiteBalance -WhiteBalance2 -WhitePoint -ColorCompensationFilter "\
"-WBShiftAB -WBShiftAB_GM -WBShiftAB_GM_Precise -WBShiftGM -WBScale "\
"-WhiteBalanceFineTune -WhiteBalanceComp -WhiteBalanceSetting "\
"-WhiteBalanceBracket -WhiteBalanceBias -WBMode -WhiteBalanceMode "\
"-WhiteBalanceTemperature -WhiteBalanceDetected -ColorTemperature "\
"-WBShiftIntelligentAuto -WBShiftCreativeControl -WhiteBalanceSetup "\
"-WBRedLevel -WBBlueLevel -WBGreenLevel -RedBalance -BlueBalance "\
"\"{0}\"".format(filename)
if filename.endswith(('.txt','.TXT')):
command = 'cat "{0}"'.format(filename)
command = shlex.split(command)
proc = subprocess.check_output(command, universal_newlines=True)
for io in proc.splitlines():
lineparts = io.split(':')
tag = lineparts[0].strip()
values = lineparts[1].strip().split(' ')
if 'Make' in tag.split():
maker = lineparts[1].strip()
elif 'Model' in tag.split():
model = lineparts[1].strip()
elif tag == "WB RGGB Levels":
green = (float(values[1])+float(values[2]))/2.0
red = float(values[0])/green
blue = float(values[3])/green
green = 1
elif tag == "WB RB Levels":
red = float(values[0])
blue = float(values[1])
if len(values) == 4 and values[2] == "256" and values[3] == "256":
red /= 256.0
blue /= 256.0
green = 1
elif tag == "WB GRB Levels":
green = float(values[0])
red = float(values[1])/green
blue = float(values[2])/green
green = 1
# elif tag == "WB GRB Levels Auto" and maker == "FUJIFILM" # fuji seems to use "WB GRB Levels Auto to describe manual finetuning
# green = float(values[0])
# red = float(values[1])/green
# blue = float(values[2])/green
# green = 1
elif tag == "White Point" and len(values) > 3:
green = (float(values[1])+float(values[2]))/2.0
red = float(values[0])/green
blue = float(values[3])/green
green = 1
elif tag == "White Balance" or tag == "White Balance 2":
preset = ' '.join(values)
if preset in FL_PRESET_REPLACE:
preset = FL_PRESET_REPLACE[preset]
elif ' '.join(tag.split()[:2]) == "WB Type":
preset_names[' '.join(tag.split()[2:])] = ' '.join(values)
elif ' '.join(tag.split()[:3]) in ['WB RGB Levels', 'WB RGGB Levels', 'WB RB Levels']:
# todo - this codepath is weird
p = ''.join(tag.split()[3:])
if( p in preset_names):
p = preset_names[p]
r=g=b=0
if len(values) == 4 and ' '.join(tag.split()[:3]) in ['WB RB Levels']:
g = (float(values[2])+float(values[3]))/2.0
r = float(values[0])/g
b = float(values[1])/g
g = 1
elif len(values) == 4:
g = (float(values[1])+float(values[2]))/2.0
r = float(values[0])/g
b = float(values[3])/g
g = 1
elif len(values) == 3:
g = float(values[1])
r = float(values[0])/g
b = float(values[2])/g
g = 1
elif len(values) == 2 and ' '.join(tag.split()[:3]) in ['WB RB Levels']:
r = float(values[0])
b = float(values[2])
g = 1
else:
eprint("Found RGB tag '{0}' with {1} values instead of 2, 3 or 4".format(p, len(values)))
if 'Fluorescent' in p:
fl_count += 1
if not p:
p= 'Unknown'
if p not in IGNORED_PRESETS:
listed_presets.append(tuple([p,r,g,b]))
elif tag == "WB Red Level":
rlevel = float(values[0])
elif tag == "WB Blue Level":
blevel = float(values[0])
elif tag == "WB Green Level":
glevel = float(values[0])
elif tag == "WB Shift AB": # canon - positive is towards amber, panasonic/leica/pentax - positive is towards blue?
finetune = values[0]
elif tag == "WB Shift GM": # detect GM shift and warn about it
gm_skew = gm_skew or (int(values[0]) != 0)
elif tag == "WB Shift AB GM": # Sony
finetune = values[0]
gm_skew = gm_skew or (int(values[1]) != 0)
elif tag == "WB Shift AB GM Precise" and maker.startswith("SONY"): # Sony
finetune = int(float(values[0]) * 2.0)
gm_skew = gm_skew or (float(values[1]) != 0.0)
elif tag == "White Balance Fine Tune" and maker.startswith("NIKON"): # nikon
finetune = 0-(int(values[0]) * 2) # nikon lies about half-steps (eg 6->6->5 instead of 6->5.5->5, need to address this later on, so rescalling this now)
gm_skew = gm_skew or (int(values[1]) != 0)
elif tag == "White Balance Fine Tune" and maker == "FUJIFILM" and int(values[3]) != 0: # fuji
eprint("Warning: Fuji does not seem to produce any sensible data for finetuning! If all finetuned values are identical, use one with no finetuning (0)")
finetune = int(values[3]) / 20 # Fuji has -180..180 but steps are every 20
gm_skew = gm_skew or (int(values[1].replace(',','')) != 0)
elif tag == "White Balance Fine Tune" and maker == "SONY" and preset == "CoolWhiteFluorescent":
# Sony's Fluorescent Fun
if values[0] == "-1":
preset = "WarmWhiteFluorescent"
elif values[0] == "0":
preset = "CoolWhiteFluorescent"
elif values[0] == "1":
preset = "DayWhiteFluorescent"
elif values[0] == "2":
preset = "DaylightFluorescent"
else:
eprint("Warning: Unknown Sony Fluorescent WB Preset!")
elif tag == "White Balance Bracket": # olympus
finetune = values[0]
gm_skew = gm_skew or (int(values[1]) != 0)
elif tag == "Color Compensation Filter": # minolta?
gm_skew = gm_skew or (int(values[0]) != 0)
if rlevel > 0 and glevel > 0 and blevel > 0:
red = rlevel/glevel
blue = blevel/glevel
green = 1
if gm_skew:
eprint('WARNING: {0} has finetuning over GM axis! Data is skewed!'.format(filename))
# Adjust the maker/model we found with the map we generated before
if exif_name_map[maker,model]:
enm = exif_name_map[maker,model]
maker = enm[0]
model = enm[1]
else:
eprint("WARNING: Couldn't find model in cameras.xml ('{0}', '{1}')".format(maker, model))
for preset_arr in listed_presets:
# ugly hack. Canon's Fluorescent is listed as WhiteFluorescent in usermanual
preset_arrv = list(preset_arr)
if maker and maker == "Canon" and preset_arrv[0] == "Fluorescent":
preset_arrv[0] = "WhiteFluorescent"
if preset_arrv[0] in FL_PRESET_REPLACE:
preset_arrv[0] = FL_PRESET_REPLACE[preset_arrv[0]]
if preset_arrv[0] not in IGNORED_PRESETS:
found_presets.append(tuple([maker,model,preset_arrv[0], 0, preset_arrv[1], preset_arrv[2], preset_arrv[3]]))
# Print out the WB value that was used in the file
if not preset:
preset = filename
if red and green and blue and preset not in IGNORED_PRESETS:
found_presets.append(tuple([maker, model, preset, int(finetune), red, green, blue]))
# get rid of duplicate presets
found_presets = list(set(found_presets))
def preset_to_sort(preset):
sort_for_preset = 0
if preset[2] in IGNORED_PRESETS:
sort_for_preset = 0
elif preset[2] in PRESET_SORT_MAPPING:
sort_for_preset = PRESET_SORT_MAPPING[preset[2]]
elif preset[2].endswith('K'):
sort_for_preset = int(preset[2][:-1])
else:
eprint("WARNING: no defined sort order for '{0}'".format(preset[2]))
return tuple([preset[0], preset[1], sort_for_preset, preset[3], preset[4], preset[5], preset[6]])
found_presets.sort(key=preset_to_sort)
min_padding = 0
for preset in found_presets:
if len(preset[2]) > min_padding:
min_padding = len(preset[2])
#dealing with Nikon half-steps
for index in range(len(found_presets)-1):
if (found_presets[index][0] == 'Nikon' and #case now translated
found_presets[index+1][0] == found_presets[index][0] and
found_presets[index+1][1] == found_presets[index][1] and
found_presets[index+1][2] == found_presets[index][2] and
found_presets[index+1][3] == found_presets[index][3]) :
curr_finetune = int(found_presets[index][3])
if curr_finetune < 0:
found_presets[index+1] = list(found_presets[index+1])
found_presets[index+1][3] = (int(found_presets[index+1][3]) + 1)
found_presets[index+1] = tuple(found_presets[index+1])
elif curr_finetune > 0:
found_presets[index] = list(found_presets[index])
found_presets[index][3] = (curr_finetune) - 1
found_presets[index] = tuple(found_presets[index])
# check for gaps in finetuning for half-steps (seems that nikon and sony can have half-steps)
for index in range(len(found_presets)-1):
if ( (found_presets[index][0] == "Nikon" or found_presets[index][0] == "Sony") and #case now translated
found_presets[index+1][0] == found_presets[index][0] and ##
found_presets[index+1][1] == found_presets[index][1] and
found_presets[index+1][2] == found_presets[index][2]) :
found_presets[index] = list(found_presets[index])
found_presets[index+1] = list(found_presets[index+1])
if (found_presets[index+1][3] % 2 == 0 and
found_presets[index][3] % 2 == 0 and
found_presets[index+1][3] == found_presets[index][3] + 2):
#detected gap eg -12 -> -10. slicing in half to undo multiplication done earlier
found_presets[index][3] = int(found_presets[index][3] / 2)
found_presets[index+1][3] = int(found_presets[index+1][3] / 2)
elif (found_presets[index+1][3] % 2 == 0 and
found_presets[index][3] % 2 == 1 and
found_presets[index+1][3] == (found_presets[index][3] + 1)*2 and
(index + 2 == len(found_presets) or
found_presets[index+2][2] != found_presets[index+1][2] ) ):
#dealing with corner case of last-halfstep not being dealth with earlier
found_presets[index+1][3] = int(found_presets[index+1][3] / 2)
found_presets[index] = tuple(found_presets[index])
found_presets[index+1] = tuple(found_presets[index+1])
#detect lazy finetuning (will not complain if there's no finetuning)
lazy_finetuning = []
for index in range(len(found_presets)-1):
if (found_presets[index+1][0] == found_presets[index][0] and ##
found_presets[index+1][1] == found_presets[index][1] and
found_presets[index+1][2] == found_presets[index][2] and
found_presets[index+1][3] != ((found_presets[index][3])+1) ):
# found gap. complain about needing to interpolate
lazy_finetuning.append(tuple([found_presets[index][0], found_presets[index][1], found_presets[index][2]]))
# Get rid of duplicate lazy finetuning reports
lazy_finetuning = list(set(lazy_finetuning))
# $stderr.puts lazy_finetuning.inspect.gsub("], ", "],\n") # debug content
for lazy in lazy_finetuning:
eprint("Gaps detected in finetuning for {0} {1} preset {2}, dt will need to interpolate!".format(lazy[0], lazy[1], lazy[2]))
for preset in found_presets:
if preset[2] in IGNORED_PRESETS:
eprint("Ignoring preset '{0}'".format(preset[2]))
else:
preset_name = ''
if preset[2].endswith('K'):
preset_name = '"'+preset[2]+'"'
else:
preset_name = preset[2]
print(' {{ "{0}", "{1}", {2:<{min_pad}}, {3}, {{ {4}, {5}, {6}, 0 }} }},'.format(preset[0], preset[1], preset_name, preset[3], preset[4], preset[5], preset[6], min_pad=min_padding)) | LebedevRI/darktable | tools/extract_wb.py | Python | gpl-3.0 | 16,320 | [
"Amber"
] | 9b32a0618c5a2e7fa9f5f16e2f7481a37df8b87872e0b6d28e613d9e108d7646 |
#!/usr/bin/python
########################################################################
# 1 August 2014
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import os, sys, re
import subprocess
import argparse
from collections import defaultdict
import math
def read_gapdh_counts_file(ifile):
with open(ifile) as f:
header= next(f)
header= header.rstrip()
word = header.split("\t")
return word[1]
def create_consensus_gtf_from_bed(peak1, peak2, outprefix):
peak_data = {}
command1 = "cat {} {} > {}_tmp1.bed".format(peak1, peak2, outprefix)
command2 = "sortBed -i {}_tmp1.bed > {}_tmp2.bed".format(outprefix, outprefix)
command3 = "mergeBed -i {}_tmp2.bed > {}.bed".format(outprefix, outprefix)
subprocess.call(command1, shell=True)
subprocess.call(command2, shell=True)
subprocess.call(command3, shell=True)
subprocess.call("rm {0}_tmp1.bed {0}_tmp2.bed".format(outprefix), shell=True)
c = 0
output = open(outprefix+".gtf", "w")
with open(outprefix+".bed") as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
chromo = re.sub("chr", "", word[0])
output.write("{}\tgene\texon\t{}\t{}\t.\t.\t.\tgene_id \"peak_{}\"; transcript_id \"peak_{}\";\n".format(chromo, word[1], word[2], c, c)),
peak_data[c] = (chromo, word[1], word[2])
c += 1
output.close()
return peak_data
def read_count_files(count):
data = {}
with open(count) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
#Get rid of the name from the GTF added in the previous function
name = re.sub("peak_", "", word[0])
data[name] = word[1]
return data
def count(inv_conds, peaks, cond1, cond2, gapdh_counts):
sample1 = inv_conds[cond1][0]
sample2 = inv_conds[cond2][0]
name = "{}_vs_{}".format(cond1, cond2)
#Combine, sort and merge peaks
peak_data = create_consensus_gtf_from_bed(peaks[sample1], peaks[sample2], name)
name1 = os.path.basename(sample1)
name2 = os.path.basename(sample2)
count1 = re.sub(".bam", ".count", name1)
count2 = re.sub(".bam", ".count", name2)
#command1 = "htseq-count -f bam -s no {} {} > {}".format(sample1, name+".gtf", count1)
#command2 = "htseq-count -f bam -s no {} {} > {}".format(sample2, name+".gtf", count2)
#subprocess.call(command1, shell=True)
#subprocess.call(command2, shell=True)
peak_count1 = read_count_files(count1)
peak_count2 = read_count_files(count2)
#GAPDH count normalisation
if gapdh_counts:
c1 = read_gapdh_counts_file(gapdh_counts[sample1])
c2 = read_gapdh_counts_file(gapdh_counts[sample2])
c1 = float(c1)/1000
c2 = float(c2)/1000
#Add 1 to values to prevent errors from 0 division
for key in peak_count1:
new_value = int(peak_count1[key])*float(c1)
peak_count1[key] = new_value +1
for key in peak_count2:
new_value = int(peak_count2[key])*float(c2)
peak_count2[key] = new_value+1
result = {}
output = open(name+".txt", "w")
output.write("Chromosome\tStart\tEnd\tPeak ID\t{}\t{}\tLFC\n".format(cond1, cond2)),
#Log fold change calculation
for key in peak_count1:
if key.startswith("_"):
pass
elif float(peak_count1[key]) > 200 or float(peak_count2[key]) > 200:
fc = float(peak_count1[key])/float(peak_count2[key])
lfc = math.log(fc, 2)
if lfc > 1 or lfc < -1:
output.write("chr{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(peak_data[int(key)][0], peak_data[int(key)][1],peak_data[int(key)][2],key, peak_count1[key], peak_count2[key], lfc)),
output.close() | pdl30/pychiptools | pychiptools/utilities/count_method.py | Python | gpl-2.0 | 3,571 | [
"HTSeq"
] | cdaaf6af10c8d9fcc45c0833db7a5bc09262d995c50d7ac2cc200ff9cd6883da |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Vladimír Slávik 2008 - 2011
# Python 2.6, 3.1
#
# for Simutrans
# http://www.simutrans.com
#
# code is public domain
#
# read all dat files in all subfolders and rehash them together with images
# into translator-acceptable amount of data
from __future__ import print_function
import re, os, glob, copy
import simutools
#-----
Data = []
paksize = 128
outdir = "dump"
#-----
def procObj(obj) :
objtype = ""
objname = ""
ratedimages = []
obj.save = False
obj.image = None
obj.newfile = ""
for line in obj.lines :
l = line.lower()
score = 0
if l.startswith("obj") :
objtype = line.split("=")[1].strip()
elif l.startswith("name") :
objname = line.split("=")[1].strip()
elif l.startswith("backimage") :
score = 100
elif l.startswith("icon") :
score = 200
elif l.startswith("cursor") :
score = 150
elif l.startswith("emptyimage") :
score = 100
elif l.startswith("freightimage") and not l.startswith("freightimagetype") :
score = 150
elif l.startswith("frontimage") :
score = 50
elif l.startswith("frontdiagonal") :
score = 50
elif l.startswith("frontimageup") :
score = 50
elif l.startswith("backdiagonal") :
score = 50
elif l.startswith("backimageup") :
score = 50
elif l.startswith("backpillar") :
score = 50
elif l.startswith("backstart") :
score = 100
elif l.startswith("frontstart") :
score = 150
elif l.startswith("backramp") :
score = 50
elif l.startswith("frontramp") :
score = 50
elif l.startswith("image") :
score = 50
elif l.startswith("diagonal") :
score = 50
elif l.startswith("imageup") :
score = 50
elif l.startswith("openimage") :
score = 50
elif l.startswith("front_openimage") :
score = 100
elif l.startswith("closedimage") :
score = 50
elif l.startswith("front_closedimage") :
score = 100
if (l.find("=-") != -1) or ("-" in l[-3:-1]) :
score = -1000
# if it is a valid image, it is rated higher than 0
if score > 0 :
if l.find("[s]") != -1 :
score = score + 100
elif l.find("[ns]") != -1 :
score = score + 200
elif l.find("[0][0][0][0][0][0]") != -1 :
score = score + 200
elif l.find("[0][0][0][0][0][1]") != -1 :
score = score + 200
elif l.find("[0][0][0][0][0]") != -1 :
score = score + 200
ratedimages.append((line, score)) # -> list of tuples
if objtype != "" :
obj.save = True
else :
return
print(objtype, ":", objname)
if len(ratedimages) > 0 :
ratedimages.sort(key = lambda a : -a[1]) # order by score
for di in ratedimages[4:] : # delete redundant - 5th image and on towards end
i = obj.lines.index(di[0]) # find the same as line saved in score table
del obj.lines[i]
keptimages = []
keptimages = ratedimages[:4] # remember what are the images that stay; at most 4
imageparts = []
obj.newfile = objtype + "_" + objname.replace(" ", "_")
obj.image = pygame.Surface((paksize, paksize*4)) # 4 tiles vertically
i = 0
for item in keptimages :
s = item[0].strip() # remove trailing \n !!!
target = s.split("=")
caller = target[0] + "="
target = target[1]
prefix = ""
if target.startswith("> ") :
prefix = "> "
target = target[2:] # split
if target.endswith(".png") :
# already full path -> reparse it inot normalized
target = target.replace(".png", "") + ".0.0"
if target.endswith(".PNG") :
# same with uppercase
target = target.replace(".PNG", "") + ".0.0"
offsets = ""
pdot = target.rfind(".")
pcomma = target.rfind(",")
if pcomma > pdot : # never gives false positive for correct entry since position is always set here
# offsets are present
pcomma = target.rfind(",", 1, pcomma) # find the previous comma - assumes both offsets are set
offsets = target[pcomma:]
target = target[:pcomma]
pdot = target.rfind(".", 1, pdot) # find second last dot
pos = target[pdot:]
target = target[:pdot] # now finally file name without png is in target
pos = pos.split(".") # offsets are [1] and [2], [0] is empty
# ORIGINAL = caller + prefix + target + ".".join(pos) + offsets
imagename = os.path.join(os.path.dirname(obj.srcfile), target + ".png")
srcimage = pygame.image.load(imagename)
coords = pygame.Rect(int(pos[2]) * paksize, int(pos[1]) * paksize, paksize, paksize) # grrr, swapped X and Y will haunt me until the end of days!
obj.image.blit(srcimage, (0, paksize * i), coords) # copy image tile
origindex = obj.lines.index(item[0]) # find where was original
obj.lines[origindex] = caller + prefix + obj.newfile + "." + str(i) + ".0" + offsets + "\n"
i = i + 1
#-----
def saveItem(obj) :
if obj.save :
f = open(os.path.join(outdir, obj.newfile + ".dat"), 'w')
for l in obj.lines :
f.write(l)
f.close()
if obj.image != None :
pygame.image.save(obj.image, os.path.join(outdir, obj.newfile + ".png"))
#-----
# main() is this piece of code
try :
import pygame
except ImportError :
print("This script needs PyGame to work!")
print("Visit http://www.pygame.org to get it.")
else :
simutools.walkFiles(os.getcwd(), simutools.loadFile, cbparam=Data)
for item in Data :
procObj(item)
if not os.path.exists(outdir) :
os.mkdir(outdir)
for item in Data :
saveItem(item)
#-----
# EOF | simutrans/pak128 | tools/extract-objects-translator-reduced.py | Python | artistic-2.0 | 5,276 | [
"VisIt"
] | 2ee38a9873b507c00d3c3d3708da3da8075f02524822ed7651108afe887a47c6 |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for a galaxy merger model evaluation."""
import glob
import os
from astropy import cosmology
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow.compat.v2 as tf
def restore_checkpoint(checkpoint_dir, experiment):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
global_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name='global_step')
checkpoint = tf.train.Checkpoint(
_global_step_=global_step, **experiment.checkpoint_items)
checkpoint.restore(checkpoint_path)
def sum_average_transformed_mu_and_sigma(mu, log_sigma_sq):
"""Computes <mu>, var(mu) + <var> in transformed representation.
This corresponds to assuming that the output distribution is a sum of
Gaussian and computing the mean and variance of the resulting (non-Gaussian)
distribution.
Args:
mu: Tensor of shape [B, ...] representing the means of the input
distributions.
log_sigma_sq: Tensor of shape [B, ...] representing log(sigma**2) of the
input distributions. Can be None, in which case the variance is assumed
to be zero.
Returns:
mu: Tensor of shape [...] representing the means of the output
distributions.
log_sigma_sq: Tensor of shape [...] representing log(sigma**2) of the
output distributions.
"""
av_mu = tf.reduce_mean(mu, axis=0)
var_mu = tf.math.reduce_std(mu, axis=0)**2
if log_sigma_sq is None:
return av_mu, tf.math.log(var_mu)
max_log_sigma_sq = tf.reduce_max(log_sigma_sq, axis=0)
log_sigma_sq -= max_log_sigma_sq
# (sigma/sigma_0)**2
sigma_sq = tf.math.exp(log_sigma_sq)
# (<sigma**2>)/sigma_0**2 (<1)
av_sigma_sq = tf.reduce_mean(sigma_sq, axis=0)
# (<sigma**2> + var(mu))/sigma_0**2
av_sigma_sq += var_mu * tf.math.exp(-max_log_sigma_sq)
# log(<sigma**2> + var(mu))
log_av_sigma_sq = tf.math.log(av_sigma_sq) + max_log_sigma_sq
return av_mu, log_av_sigma_sq
def aggregate_regression_ensemble(logits_or_times, ensemble_size,
use_uncertainty, test_time_ensembling):
"""Aggregate output of model ensemble."""
out_shape = logits_or_times.shape.as_list()[1:]
logits_or_times = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape)
mus = logits_or_times[..., 0]
log_sigma_sqs = logits_or_times[..., -1] if use_uncertainty else None
if test_time_ensembling == 'sum':
mu, log_sigma_sq = sum_average_transformed_mu_and_sigma(mus, log_sigma_sqs)
elif test_time_ensembling == 'none':
mu = mus[0]
log_sigma_sq = log_sigma_sqs[0] if use_uncertainty else None
else:
raise ValueError('Unexpected test_time_ensembling')
return mu, log_sigma_sq
def aggregate_classification_ensemble(logits_or_times, ensemble_size,
test_time_ensembling):
"""Averages the output logits across models in the ensemble."""
out_shape = logits_or_times.shape.as_list()[1:]
logits = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape)
if test_time_ensembling == 'sum':
logits = tf.reduce_mean(logits, axis=0)
return logits, None
elif test_time_ensembling == 'none':
return logits, None
else:
raise ValueError('Unexpected test_time_ensembling')
def unpack_evaluator_output(data, return_seq_info=False, return_redshift=False):
"""Unpack evaluator.run_model_on_dataset output."""
mus = np.array(data[1]['mu']).flatten()
sigmas = np.array(data[1]['sigma']).flatten()
regression_targets = np.array(data[1]['regression_targets']).flatten()
outputs = [mus, sigmas, regression_targets]
if return_seq_info:
seq_ids = np.array(data[2][0]).flatten()
seq_ids = np.array([seq_id.decode('UTF-8') for seq_id in seq_ids])
time_idxs = np.array(data[2][1]).flatten()
axes = np.array(data[2][2]).flatten()
outputs += [seq_ids, axes, time_idxs]
if return_redshift:
redshifts = np.array(data[2][6]).flatten()
outputs += [redshifts]
return outputs
def process_data_into_myrs(redshifts, *data_lists):
"""Converts normalized time to virial time using Planck cosmology."""
# small hack to avoid build tools not recognizing non-standard trickery
# done in the astropy library:
# https://github.com/astropy/astropy/blob/master/astropy/cosmology/core.py#L3290
# that dynamically generates and imports new classes.
planck13 = getattr(cosmology, 'Plank13')
hubble_constants = planck13.H(redshifts) # (km/s)/megaparsec
inv_hubble_constants = 1/hubble_constants # (megaparsec*s) / km
megaparsec_to_km = 1e19*3.1
seconds_to_gigayears = 1e-15/31.556
conversion_factor = megaparsec_to_km * seconds_to_gigayears
hubble_time_gigayears = conversion_factor * inv_hubble_constants
hubble_to_virial_time = 0.14 # approximate simulation-based conversion factor
virial_dyn_time = hubble_to_virial_time*hubble_time_gigayears.value
return [data_list*virial_dyn_time for data_list in data_lists]
def print_rmse_and_class_accuracy(mus, regression_targets, redshifts):
"""Convert to virial dynamical time and print stats."""
time_pred, time_gt = process_data_into_myrs(
redshifts, mus, regression_targets)
time_sq_errors = (time_pred-time_gt)**2
rmse = np.sqrt(np.mean(time_sq_errors))
labels = regression_targets > 0
class_preds = mus > 0
accuracy = sum((labels == class_preds).astype(np.int8)) / len(class_preds)
print(f'95% Error: {np.percentile(np.sqrt(time_sq_errors), 95)}')
print(f'RMSE: {rmse}')
print(f'Classification Accuracy: {accuracy}')
def print_stats(vec, do_print=True):
fvec = vec.flatten()
if do_print:
print(len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec))
return (len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec))
def get_image_from_fits(base_dir, seq='475_31271', time='497', axis=2):
"""Read *.fits galaxy image from directory."""
axis_map = {0: 'x', 1: 'y', 2: 'z'}
fits_glob = f'{base_dir}/{seq}/fits_of_flux_psf/{time}/*_{axis_map[axis]}_*.fits'
def get_freq_from_path(p):
return int(p.split('/')[-1].split('_')[2][1:])
fits_image_paths = sorted(glob.glob(fits_glob), key=get_freq_from_path)
assert len(fits_image_paths) == 7
combined_frequencies = []
for fit_path in fits_image_paths:
with open(fit_path, 'rb') as f:
fits_data = np.array(fits.open(f)[0].data.astype(np.float32))
combined_frequencies.append(fits_data)
fits_image = np.transpose(np.array(combined_frequencies), (1, 2, 0))
return fits_image
def stack_desired_galaxy_images(base_dir, seq, n_time_slices):
"""Searth through galaxy image directory gathering images."""
fits_sequence_dir = os.path.join(base_dir, seq, 'fits_of_flux_psf')
all_times_for_seq = os.listdir(fits_sequence_dir)
hop = (len(all_times_for_seq)-1)//(n_time_slices-1)
desired_time_idxs = [k*hop for k in range(n_time_slices)]
all_imgs = []
for j in desired_time_idxs:
time = all_times_for_seq[j]
img = get_image_from_fits(base_dir=base_dir, seq=seq, time=time, axis=2)
all_imgs.append(img)
min_img_size = min([img.shape[0] for img in all_imgs])
return all_imgs, min_img_size
def draw_galaxy_image(image, target_size=None, color_map='viridis'):
normalized_image = image / max(image.flatten())
color_map = plt.get_cmap(color_map)
colored_image = color_map(normalized_image)[:, :, :3]
colored_image = (colored_image * 255).astype(np.uint8)
colored_image = Image.fromarray(colored_image, mode='RGB')
if target_size:
colored_image = colored_image.resize(target_size, Image.ANTIALIAS)
return colored_image
def collect_merger_sequence(ds, seq=b'370_11071', n_examples_to_sift=5000):
images, targets, redshifts = [], [], []
for i, all_inputs in enumerate(ds):
if all_inputs[4][0].numpy() == seq:
images.append(all_inputs[0][0].numpy())
targets.append(all_inputs[2][0].numpy())
redshifts.append(all_inputs[10][0].numpy())
if i > n_examples_to_sift: break
return np.squeeze(images), np.squeeze(targets), np.squeeze(redshifts)
def take_samples(sample_idxs, *data_lists):
return [np.take(l, sample_idxs, axis=0) for l in data_lists]
| deepmind/deepmind-research | galaxy_mergers/helpers.py | Python | apache-2.0 | 8,748 | [
"Galaxy",
"Gaussian"
] | 911e1b2e8b92f1812cbcb66447ee36fb24b7abaf9dfd0f0d6580fa1cce0eb85a |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import dask.array as da
import logging
import warnings
from scipy import ndimage
try:
# For scikit-image >= 0.17.0
from skimage.registration._phase_cross_correlation import _upsampled_dft
except ModuleNotFoundError:
from skimage.feature.register_translation import _upsampled_dft
from hyperspy.defaults_parser import preferences
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.math_tools import symmetrize, antisymmetrize, optimal_fft_size
from hyperspy.signal import BaseSignal
from hyperspy._signals.signal1d import Signal1D
from hyperspy._signals.lazy import LazySignal
from hyperspy._signals.common_signal2d import CommonSignal2D
from hyperspy.signal_tools import PeaksFinder2D
from hyperspy.docstrings.plot import (
BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS, PLOT2D_DOCSTRING,
PLOT2D_KWARGS_DOCSTRING)
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.utils.peakfinders2D import (
find_local_max, find_peaks_max, find_peaks_minmax, find_peaks_zaefferer,
find_peaks_stat, find_peaks_log, find_peaks_dog, find_peaks_xc)
_logger = logging.getLogger(__name__)
def shift_image(im, shift=0, interpolation_order=1, fill_value=np.nan):
if not np.any(shift):
return im
else:
fractional, integral = np.modf(shift)
if fractional.any():
order = interpolation_order
else:
# Disable interpolation
order = 0
return ndimage.shift(im, shift, cval=fill_value, order=order)
def triu_indices_minus_diag(n):
"""Returns the indices for the upper-triangle of an (n, n) array
excluding its diagonal
Parameters
----------
n : int
The length of the square array
"""
ti = np.triu_indices(n)
isnotdiag = ti[0] != ti[1]
return ti[0][isnotdiag], ti[1][isnotdiag]
def hanning2d(M, N):
"""
A 2D hanning window created by outer product.
"""
return np.outer(np.hanning(M), np.hanning(N))
def sobel_filter(im):
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
return sob
def fft_correlation(in1, in2, normalize=False, real_only=False):
"""Correlation of two N-dimensional arrays using FFT.
Adapted from scipy's fftconvolve.
Parameters
----------
in1, in2 : array
Input arrays to convolve.
normalize: bool, default False
If True performs phase correlation.
real_only : bool, default False
If True, and in1 and in2 are real-valued inputs, uses
rfft instead of fft for approx. 2x speed-up.
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
size = s1 + s2 - 1
# Calculate optimal FFT size
complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
fsize = [optimal_fft_size(a, not complex_result) for a in size]
# For real-valued inputs, rfftn is ~2x faster than fftn
if not complex_result and real_only:
fft_f, ifft_f = np.fft.rfftn, np.fft.irfftn
else:
fft_f, ifft_f = np.fft.fftn, np.fft.ifftn
fprod = fft_f(in1, fsize)
fprod *= fft_f(in2, fsize).conjugate()
if normalize is True:
fprod = np.nan_to_num(fprod / np.absolute(fprod))
ret = ifft_f(fprod).real.copy()
return ret, fprod
def estimate_image_shift(ref, image, roi=None, sobel=True,
medfilter=True, hanning=True, plot=False,
dtype='float', normalize_corr=False,
sub_pixel_factor=1,
return_maxval=True):
"""Estimate the shift in a image using phase correlation
This method can only estimate the shift by comparing
bidimensional features that should not change the position
in the given axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient
to select a region of interest by setting the roi keyword.
Parameters
----------
ref : 2D numpy.ndarray
Reference image
image : 2D numpy.ndarray
Image to register
roi : tuple of ints (top, bottom, left, right)
Define the region of interest
sobel : bool
apply a sobel filter for edge enhancement
medfilter : bool
apply a median filter for noise reduction
hanning : bool
Apply a 2d hanning filter
plot : bool or matplotlib.Figure
If True, plots the images after applying the filters and the phase
correlation. If a figure instance, the images will be plotted to the
given figure.
reference : 'current' or 'cascade'
If 'current' (default) the image at the current
coordinates is taken as reference. If 'cascade' each image
is aligned with the previous one.
dtype : str or dtype
Typecode or data-type in which the calculations must be
performed.
normalize_corr : bool
If True use phase correlation instead of standard correlation
sub_pixel_factor : float
Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor parts
of a pixel. Default is 1, i.e. no sub-pixel accuracy.
Returns
-------
shifts: np.array
containing the estimate shifts
max_value : float
The maximum value of the correlation
Notes
-----
The statistical analysis approach to the translation estimation
when using reference='stat' roughly follows [*]_ . If you use
it please cite their article.
References
----------
.. [*] Bernhard Schaffer, Werner Grogger and Gerald Kothleitner.
“Automated Spatial Drift Correction for EFTEM Image Series.”
Ultramicroscopy 102, no. 1 (December 2004): 27–36.
"""
ref, image = da.compute(ref, image)
# Make a copy of the images to avoid modifying them
ref = ref.copy().astype(dtype)
image = image.copy().astype(dtype)
if roi is not None:
top, bottom, left, right = roi
else:
top, bottom, left, right = [None, ] * 4
# Select region of interest
ref = ref[top:bottom, left:right]
image = image[top:bottom, left:right]
# Apply filters
for im in (ref, image):
if hanning is True:
im *= hanning2d(*im.shape)
if medfilter is True:
# This is faster than sp.signal.med_filt,
# which was the previous implementation.
# The size is fixed at 3 to be consistent
# with the previous implementation.
im[:] = ndimage.median_filter(im, size=3)
if sobel is True:
im[:] = sobel_filter(im)
# If sub-pixel alignment not being done, use faster real-valued fft
real_only = (sub_pixel_factor == 1)
phase_correlation, image_product = fft_correlation(
ref, image, normalize=normalize_corr, real_only=real_only)
# Estimate the shift by getting the coordinates of the maximum
argmax = np.unravel_index(np.argmax(phase_correlation),
phase_correlation.shape)
threshold = (phase_correlation.shape[0] / 2 - 1,
phase_correlation.shape[1] / 2 - 1)
shift0 = argmax[0] if argmax[0] < threshold[0] else \
argmax[0] - phase_correlation.shape[0]
shift1 = argmax[1] if argmax[1] < threshold[1] else \
argmax[1] - phase_correlation.shape[1]
max_val = phase_correlation.real.max()
shifts = np.array((shift0, shift1))
# The following code is more or less copied from
# skimage.feature.register_feature, to gain access to the maximum value:
if sub_pixel_factor != 1:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * sub_pixel_factor) / sub_pixel_factor
upsampled_region_size = np.ceil(sub_pixel_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
sub_pixel_factor = np.array(sub_pixel_factor, dtype=np.float64)
normalization = (image_product.size * sub_pixel_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * sub_pixel_factor
correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
sub_pixel_factor,
sample_region_offset).conj()
correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.array(np.unravel_index(
np.argmax(np.abs(correlation)),
correlation.shape),
dtype=np.float64)
maxima -= dftshift
shifts = shifts + maxima / sub_pixel_factor
max_val = correlation.real.max()
# Plot on demand
if plot is True or isinstance(plot, plt.Figure):
if isinstance(plot, plt.Figure):
fig = plot
axarr = plot.axes
if len(axarr) < 3:
for i in range(3):
fig.add_subplot(1, 3, i + 1)
axarr = fig.axes
else:
fig, axarr = plt.subplots(1, 3)
full_plot = len(axarr[0].images) == 0
if full_plot:
axarr[0].set_title('Reference')
axarr[1].set_title('Image')
axarr[2].set_title('Phase correlation')
axarr[0].imshow(ref)
axarr[1].imshow(image)
d = (np.array(phase_correlation.shape) - 1) // 2
extent = [-d[1], d[1], -d[0], d[0]]
axarr[2].imshow(np.fft.fftshift(phase_correlation),
extent=extent)
plt.show()
else:
axarr[0].images[0].set_data(ref)
axarr[1].images[0].set_data(image)
axarr[2].images[0].set_data(np.fft.fftshift(phase_correlation))
# TODO: Renormalize images
fig.canvas.draw_idle()
# Liberate the memory. It is specially necessary if it is a
# memory map
del ref
del image
if return_maxval:
return -shifts, max_val
else:
return -shifts
class Signal2D(BaseSignal, CommonSignal2D):
"""
"""
_signal_dimension = 2
_lazy = False
def __init__(self, *args, **kwargs):
if kwargs.get('ragged', False):
raise ValueError("Signal2D can't be ragged.")
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 2:
self.axes_manager.set_signal_dimension(2)
def plot(self,
navigator="auto",
plot_markers=True,
autoscale='v',
saturated_pixels=None,
norm="auto",
vmin=None,
vmax=None,
gamma=1.0,
linthresh=0.01,
linscale=0.1,
scalebar=True,
scalebar_color="white",
axes_ticks=None,
axes_off=False,
axes_manager=None,
no_nans=False,
colorbar=True,
centre_colormap="auto",
min_aspect=0.1,
navigator_kwds={},
**kwargs
):
"""%s
%s
%s
%s
"""
for c in autoscale:
if c not in ['x', 'y', 'v']:
raise ValueError("`autoscale` only accepts 'x', 'y', 'v' as "
"valid characters.")
super().plot(
navigator=navigator,
plot_markers=plot_markers,
autoscale=autoscale,
saturated_pixels=saturated_pixels,
norm=norm,
vmin=vmin,
vmax=vmax,
gamma=gamma,
linthresh=linthresh,
linscale=linscale,
scalebar=scalebar,
scalebar_color=scalebar_color,
axes_ticks=axes_ticks,
axes_off=axes_off,
axes_manager=axes_manager,
no_nans=no_nans,
colorbar=colorbar,
centre_colormap=centre_colormap,
min_aspect=min_aspect,
navigator_kwds=navigator_kwds,
**kwargs
)
plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT2D_DOCSTRING, PLOT2D_KWARGS_DOCSTRING)
def create_model(self, dictionary=None):
"""Create a model for the current signal
Parameters
----------
dictionary : {None, dict}, optional
A dictionary to be used to recreate a model. Usually generated
using :meth:`hyperspy.model.as_dictionary`
Returns
-------
A Model class
"""
from hyperspy.models.model2d import Model2D
return Model2D(self, dictionary=dictionary)
def estimate_shift2D(self,
reference='current',
correlation_threshold=None,
chunk_size=30,
roi=None,
normalize_corr=False,
sobel=True,
medfilter=True,
hanning=True,
plot=False,
dtype='float',
show_progressbar=None,
sub_pixel_factor=1):
"""Estimate the shifts in an image using phase correlation.
This method can only estimate the shift by comparing
bi-dimensional features that should not change position
between frames. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient
to select a region of interest by setting the ``roi`` argument.
Parameters
----------
reference : {'current', 'cascade' ,'stat'}
If 'current' (default) the image at the current
coordinates is taken as reference. If 'cascade' each image
is aligned with the previous one. If 'stat' the translation
of every image with all the rest is estimated and by
performing statistical analysis on the result the
translation is estimated.
correlation_threshold : {None, 'auto', float}
This parameter is only relevant when reference='stat'.
If float, the shift estimations with a maximum correlation
value lower than the given value are not used to compute
the estimated shifts. If 'auto' the threshold is calculated
automatically as the minimum maximum correlation value
of the automatically selected reference image.
chunk_size : {None, int}
If int and reference='stat' the number of images used
as reference are limited to the given value.
roi : tuple of ints or floats (left, right, top, bottom)
Define the region of interest. If int(float) the position
is given axis index(value). Note that ROIs can be used
in place of a tuple.
normalize_corr : bool, default False
If True, use phase correlation to align the images, otherwise
use cross correlation.
sobel : bool, default True
Apply a Sobel filter for edge enhancement
medfilter : bool, default True
Apply a median filter for noise reduction
hanning : bool, default True
Apply a 2D hanning filter
plot : bool or 'reuse'
If True plots the images after applying the filters and
the phase correlation. If 'reuse', it will also plot the images,
but it will only use one figure, and continuously update the images
in that figure as it progresses through the stack.
dtype : str or dtype
Typecode or data-type in which the calculations must be
performed.
%s
sub_pixel_factor : float
Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor
parts of a pixel. Default is 1, i.e. no sub-pixel accuracy.
Returns
-------
shifts : list of array
List of estimated shifts
Notes
-----
The statistical analysis approach to the translation estimation
when using ``reference='stat'`` roughly follows [Schaffer2004]_.
If you use it please cite their article.
References
----------
.. [Schaffer2004] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner.
“Automated Spatial Drift Correction for EFTEM Image Series.”
Ultramicroscopy 102, no. 1 (December 2004): 27–36.
See Also
--------
* :py:meth:`~._signals.signal2d.Signal2D.align2D`
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_two()
if roi is not None:
# Get the indices of the roi
yaxis = self.axes_manager.signal_axes[1]
xaxis = self.axes_manager.signal_axes[0]
roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
[yaxis._get_index(i) for i in roi[:2]])
ref = None if reference == 'cascade' else \
self.__call__().copy()
shifts = []
nrows = None
images_number = self.axes_manager._max_index + 1
if plot == 'reuse':
# Reuse figure for plots
plot = plt.figure()
if reference == 'stat':
nrows = images_number if chunk_size is None else \
min(images_number, chunk_size)
pcarray = ma.zeros((nrows, self.axes_manager._max_index + 1,
),
dtype=np.dtype([('max_value', float),
('shift', np.int32,
(2,))]))
nshift, max_value = estimate_image_shift(
self(),
self(),
roi=roi,
sobel=sobel,
medfilter=medfilter,
hanning=hanning,
normalize_corr=normalize_corr,
plot=plot,
dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
np.fill_diagonal(pcarray['max_value'], max_value)
pbar_max = nrows * images_number
else:
pbar_max = images_number
# Main iteration loop. Fills the rows of pcarray when reference
# is stat
with progressbar(total=pbar_max,
disable=not show_progressbar,
leave=True) as pbar:
for i1, im in enumerate(self._iterate_signal()):
if reference in ['current', 'cascade']:
if ref is None:
ref = im.copy()
shift = np.array([0., 0.])
nshift, max_val = estimate_image_shift(
ref, im, roi=roi, sobel=sobel, medfilter=medfilter,
hanning=hanning, plot=plot,
normalize_corr=normalize_corr, dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
if reference == 'cascade':
shift += nshift
ref = im.copy()
else:
shift = nshift
shifts.append(shift.copy())
pbar.update(1)
elif reference == 'stat':
if i1 == nrows:
break
# Iterate to fill the columns of pcarray
for i2, im2 in enumerate(
self._iterate_signal()):
if i2 > i1:
nshift, max_value = estimate_image_shift(
im,
im2,
roi=roi,
sobel=sobel,
medfilter=medfilter,
hanning=hanning,
normalize_corr=normalize_corr,
plot=plot,
dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
pcarray[i1, i2] = max_value, nshift
del im2
pbar.update(1)
del im
if reference == 'stat':
# Select the reference image as the one that has the
# higher max_value in the row
sqpcarr = pcarray[:, :nrows]
sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
ref_index = np.argmax(pcarray['max_value'].min(1))
self.ref_index = ref_index
shifts = (pcarray['shift'] +
pcarray['shift'][ref_index, :nrows][:, np.newaxis])
if correlation_threshold is not None:
if correlation_threshold == 'auto':
correlation_threshold = \
(pcarray['max_value'].min(0)).max()
_logger.info("Correlation threshold = %1.2f",
correlation_threshold)
shifts[pcarray['max_value'] <
correlation_threshold] = ma.masked
shifts.mask[ref_index, :] = False
shifts = shifts.mean(0)
else:
shifts = np.array(shifts)
del ref
return shifts
estimate_shift2D.__doc__ %= SHOW_PROGRESSBAR_ARG
def align2D(
self,
crop=True,
fill_value=np.nan,
shifts=None,
expand=False,
interpolation_order=1,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Align the images in-place using :py:func:`scipy.ndimage.shift`.
The images can be aligned using either user-provided shifts or
by first estimating the shifts.
See :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
for more details on estimating image shifts.
Parameters
----------
crop : bool
If True, the data will be cropped not to include regions
with missing data
fill_value : int, float, nan
The areas with missing data are filled with the given value.
Default is nan.
shifts : None or list of tuples
If None the shifts are estimated using
:py:meth:`~._signals.signal2D.estimate_shift2D`.
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
interpolation_order: int, default 1.
The order of the spline interpolation. Default is 1, linear
interpolation.
%s
%s
%s
**kwargs :
Keyword arguments passed to :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
Returns
-------
shifts : np.array
The estimated shifts are returned only if ``shifts`` is None
Raises
------
NotImplementedError
If one of the signal axes is a non-uniform axis.
See Also
--------
* :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
"""
self._check_signal_dimension_equals_two()
for _axis in self.axes_manager.signal_axes:
if not _axis.is_uniform:
raise NotImplementedError(
"This operation is not implememented for non-uniform axes")
return_shifts = False
if shifts is None:
shifts = self.estimate_shift2D(**kwargs)
return_shifts = True
if not np.any(shifts):
warnings.warn(
"The estimated shifts are all zero, suggesting "
"the images are already aligned",
UserWarning,
)
return shifts
elif not np.any(shifts):
warnings.warn(
"The provided shifts are all zero, no alignment done",
UserWarning,
)
return None
if isinstance(shifts, np.ndarray):
signal_shifts = Signal1D(-shifts)
else:
signal_shifts = shifts
if expand:
# Expand to fit all valid data
left, right = (
int(np.floor(signal_shifts.isig[1].min().data)) if signal_shifts.isig[1].min().data < 0 else 0,
int(np.ceil(signal_shifts.isig[1].max().data)) if signal_shifts.isig[1].max().data > 0 else 0,
)
top, bottom = (
int(np.ceil(signal_shifts.isig[0].min().data)) if signal_shifts.isig[0].min().data < 0 else 0,
int(np.floor(signal_shifts.isig[0].max().data)) if signal_shifts.isig[0].max().data > 0 else 0,
)
xaxis = self.axes_manager.signal_axes[0]
yaxis = self.axes_manager.signal_axes[1]
padding = []
for i in range(self.data.ndim):
if i == xaxis.index_in_array:
padding.append((-left, right))
elif i == yaxis.index_in_array:
padding.append((-top, bottom))
else:
padding.append((0, 0))
self.data = np.pad(
self.data, padding, mode="constant", constant_values=(fill_value,)
)
if left < 0:
xaxis.offset += left * xaxis.scale
if np.any((left < 0, right > 0)):
xaxis.size += right - left
if top < 0:
yaxis.offset += top * yaxis.scale
if np.any((top < 0, bottom > 0)):
yaxis.size += bottom - top
# Translate, with sub-pixel precision if necessary,
# note that we operate in-place here
self.map(
shift_image,
shift=signal_shifts,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False,
inplace=True,
fill_value=fill_value,
interpolation_order=interpolation_order,
)
if crop and not expand:
max_shift = signal_shifts.max() - signal_shifts.min()
if np.any(max_shift.data >= np.array(self.axes_manager.signal_shape)):
raise ValueError("Shift outside range of signal axes. Cannot crop signal." +
"Max shift:" + str(max_shift.data) + " shape" + str(self.axes_manager.signal_shape))
# Crop the image to the valid size
shifts = -shifts
bottom, top = (
int(np.floor(signal_shifts.isig[0].min().data)) if signal_shifts.isig[0].min().data < 0 else None,
int(np.ceil(signal_shifts.isig[0].max().data)) if signal_shifts.isig[0].max().data > 0 else 0,
)
right, left = (
int(np.floor(signal_shifts.isig[1].min().data)) if signal_shifts.isig[1].min().data < 0 else None,
int(np.ceil(signal_shifts.isig[1].max().data)) if signal_shifts.isig[1].max().data > 0 else 0,
)
self.crop_image(top, bottom, left, right)
shifts = -shifts
self.events.data_changed.trigger(obj=self)
if return_shifts:
return shifts
align2D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def crop_image(self, top=None, bottom=None,
left=None, right=None, convert_units=False):
"""Crops an image in place.
Parameters
----------
top, bottom, left, right : {int | float}
If int the values are taken as indices. If float the values are
converted to indices.
convert_units : bool
Default is False
If True, convert the signal units using the 'convert_to_units'
method of the `axes_manager`. If False, does nothing.
See also
--------
crop
"""
self._check_signal_dimension_equals_two()
self.crop(self.axes_manager.signal_axes[1].index_in_axes_manager,
top,
bottom)
self.crop(self.axes_manager.signal_axes[0].index_in_axes_manager,
left,
right)
if convert_units:
self.axes_manager.convert_units('signal')
def add_ramp(self, ramp_x, ramp_y, offset=0):
"""Add a linear ramp to the signal.
Parameters
----------
ramp_x: float
Slope of the ramp in x-direction.
ramp_y: float
Slope of the ramp in y-direction.
offset: float, optional
Offset of the ramp at the signal fulcrum.
Notes
-----
The fulcrum of the linear ramp is at the origin and the slopes are
given in units of the axis with the according scale taken into
account. Both are available via the `axes_manager` of the signal.
"""
yy, xx = np.indices(self.axes_manager._signal_shape_in_array)
if self._lazy:
import dask.array as da
ramp = offset * da.ones(self.data.shape, dtype=self.data.dtype,
chunks=self.data.chunks)
else:
ramp = offset * np.ones(self.data.shape, dtype=self.data.dtype)
ramp += ramp_x * xx
ramp += ramp_y * yy
self.data += ramp
def find_peaks(self, method='local_max', interactive=True,
current_index=False, show_progressbar=None,
parallel=None, max_workers=None, display=True, toolkit=None,
**kwargs):
"""Find peaks in a 2D signal.
Function to locate the positive peaks in an image using various, user
specified, methods. Returns a structured array containing the peak
positions.
Parameters
----------
method : str
Select peak finding algorithm to implement. Available methods
are:
* 'local_max' - simple local maximum search using the
:py:func:`skimage.feature.peak_local_max` function
* 'max' - simple local maximum search using the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_max`.
* 'minmax' - finds peaks by comparing maximum filter results
with minimum filter, calculates centers of mass. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax`
function.
* 'zaefferer' - based on gradient thresholding and refinement
by local region of interest optimisation. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_zaefferer`
function.
* 'stat' - based on statistical refinement and difference with
respect to mean intensity. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_stat`
function.
* 'laplacian_of_gaussian' - a blob finder using the laplacian of
Gaussian matrices approach. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log`
function.
* 'difference_of_gaussian' - a blob finder using the difference
of Gaussian matrices approach. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log`
function.
* 'template_matching' - A cross correlation peakfinder. This
method requires providing a template with the ``template``
parameter, which is used as reference pattern to perform the
template matching to the signal. It uses the
:py:func:`skimage.feature.match_template` function and the peaks
position are obtained by using `minmax` method on the
template matching result.
interactive : bool
If True, the method parameter can be adjusted interactively.
If False, the results will be returned.
current_index : bool
if True, the computation will be performed for the current index.
%s
%s
%s
%s
%s
**kwargs : dict
Keywords parameters associated with above methods, see the
documentation of each method for more details.
Notes
-----
As a convenience, the 'local_max' method accepts the 'distance' and
'threshold' argument, which will be map to the 'min_distance' and
'threshold_abs' of the :py:func:`skimage.feature.peak_local_max`
function.
Returns
-------
peaks : :py:class:`~hyperspy.signal.BaseSignal` or numpy.ndarray if current_index=True
Array of shape `_navigation_shape_in_array` in which each cell
contains an array with dimensions (npeaks, 2) that contains
the `x, y` pixel coordinates of peaks found in each image sorted
first along `y` and then along `x`.
"""
method_dict = {
'local_max': find_local_max,
'max': find_peaks_max,
'minmax': find_peaks_minmax,
'zaefferer': find_peaks_zaefferer,
'stat': find_peaks_stat,
'laplacian_of_gaussian': find_peaks_log,
'difference_of_gaussian': find_peaks_dog,
'template_matching' : find_peaks_xc,
}
# As a convenience, we map 'distance' to 'min_distance' and
# 'threshold' to 'threshold_abs' when using the 'local_max' method to
# match with the arguments of skimage.feature.peak_local_max.
if method == 'local_max':
if 'distance' in kwargs.keys():
kwargs['min_distance'] = kwargs.pop('distance')
if 'threshold' in kwargs.keys():
kwargs['threshold_abs'] = kwargs.pop('threshold')
if method in method_dict.keys():
method_func = method_dict[method]
else:
raise NotImplementedError(f"The method `{method}` is not "
"implemented. See documentation for "
"available implementations.")
if interactive:
# Create a peaks signal with the same navigation shape as a
# placeholder for the output
axes_dict = self.axes_manager._get_axes_dicts(
self.axes_manager.navigation_axes)
peaks = BaseSignal(np.empty(self.axes_manager.navigation_shape),
axes=axes_dict)
pf2D = PeaksFinder2D(self, method=method, peaks=peaks, **kwargs)
pf2D.gui(display=display, toolkit=toolkit)
elif current_index:
peaks = method_func(self.__call__(), **kwargs)
else:
peaks = self.map(method_func, show_progressbar=show_progressbar,
parallel=parallel, inplace=False, ragged=True,
max_workers=max_workers, **kwargs)
if peaks._lazy:
peaks.compute()
return peaks
find_peaks.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
DISPLAY_DT, TOOLKIT_DT)
class LazySignal2D(LazySignal, Signal2D):
_lazy = True
| ericpre/hyperspy | hyperspy/_signals/signal2d.py | Python | gpl-3.0 | 36,718 | [
"Gaussian"
] | 7a1b1a99131b43fb3e2b98755cc3d9159f5851ebc686bc60f3a7ae7496ac405c |
"""
Augmenters that perform simple arithmetic changes.
List of augmenters:
* :class:`Add`
* :class:`AddElementwise`
* :class:`AdditiveGaussianNoise`
* :class:`AdditiveLaplaceNoise`
* :class:`AdditivePoissonNoise`
* :class:`Multiply`
* :class:`MultiplyElementwise`
* :class:`Cutout`
* :class:`Dropout`
* :class:`CoarseDropout`
* :class:`Dropout2d`
* :class:`TotalDropout`
* :class:`ReplaceElementwise`
* :class:`ImpulseNoise`
* :class:`SaltAndPepper`
* :class:`CoarseSaltAndPepper`
* :class:`Salt`
* :class:`CoarseSalt`
* :class:`Pepper`
* :class:`CoarsePepper`
* :class:`Invert`
* :class:`Solarize`
* :class:`ContrastNormalization`
* :class:`JpegCompression`
"""
from __future__ import print_function, division, absolute_import
import tempfile
import imageio
import numpy as np
import cv2
import imgaug as ia
from . import meta
from .. import parameters as iap
from .. import dtypes as iadt
from .. import random as iarandom
from ..imgaug import _normalize_cv2_input_arr_
# fill modes for apply_cutout_() and Cutout augmenter
# contains roughly:
# 'str fill_mode_name => (str module_name, str function_name)'
# We could also assign the function to each fill mode name instead of its
# name, but that has the disadvantage that these aren't defined yet (they
# are defined further below) and that during unittesting they would be harder
# to mock. (mock.patch() seems to not automatically replace functions
# assigned in that way.)
_CUTOUT_FILL_MODES = {
"constant": ("imgaug.augmenters.arithmetic", "_fill_rectangle_constant_"),
"gaussian": ("imgaug.augmenters.arithmetic", "_fill_rectangle_gaussian_")
}
def add_scalar(image, value):
"""Add a scalar value (or one scalar per channel) to an image.
This method ensures that ``uint8`` does not overflow during the addition.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
If `value` contains more than one value, the shape of the image is
expected to be ``(H,W,C)``.
value : number or ndarray
The value to add to the image. Either a single value or an array
containing exactly one component per channel, i.e. ``C`` components.
Returns
-------
ndarray
Image with value added to it.
"""
return add_scalar_(np.copy(image), value)
def add_scalar_(image, value):
"""Add in-place a scalar value (or one scalar per channel) to an image.
This method ensures that ``uint8`` does not overflow during the addition.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
If `value` contains more than one value, the shape of the image is
expected to be ``(H,W,C)``.
The image might be changed in-place.
value : number or ndarray
The value to add to the image. Either a single value or an array
containing exactly one component per channel, i.e. ``C`` components.
Returns
-------
ndarray
Image with value added to it.
This might be the input `image`, changed in-place.
"""
if image.size == 0:
return np.copy(image)
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 int8 int16 float16 float32",
disallowed="uint32 uint64 int32 int64 float64 float128",
augmenter=None)
if image.dtype == iadt._UINT8_DTYPE:
return _add_scalar_to_uint8_(image, value)
return _add_scalar_to_non_uint8(image, value)
def _add_scalar_to_uint8_(image, value):
if ia.is_single_number(value):
is_single_value = True
value = round(value)
elif ia.is_np_scalar(value) or ia.is_np_array(value):
is_single_value = (value.size == 1)
value = np.round(value) if value.dtype.kind == "f" else value
else:
is_single_value = False
is_channelwise = not is_single_value
if image.ndim == 2 and is_single_value:
return cv2.add(image, value, dst=image, dtype=cv2.CV_8U)
input_shape = image.shape
image = image.ravel()
values = np.array(value)
if not is_channelwise:
values = np.broadcast_to(values, image.shape)
else:
values = np.tile(values, image.size // len(values))
image_add = cv2.add(image, values, dst=image, dtype=cv2.CV_8U)
return image_add.reshape(input_shape)
def _add_scalar_to_non_uint8(image, value):
input_dtype = image.dtype
is_single_value = (
ia.is_single_number(value)
or ia.is_np_scalar(value)
or (ia.is_np_array(value) and value.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
shape = (1, 1, nb_channels if is_channelwise else 1)
value = np.array(value).reshape(shape)
# We limit here the value range of the value parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
#
# We need 2* the itemsize of the image here to allow to shift
# the image's max value to the lowest possible value, e.g. for
# uint8 it must allow for -255 to 255.
itemsize = image.dtype.itemsize * 2
dtype_target = np.dtype("%s%d" % (value.dtype.kind, itemsize))
value = iadt.clip_to_dtype_value_range_(
value, dtype_target, validate=True)
# Itemsize is currently reduced from 2 to 1 due to clip no
# longer supporting int64, which can cause issues with int32
# samples (32*2 = 64bit).
# TODO limit value ranges of samples to int16/uint16 for
# security
image, value = iadt.promote_array_dtypes_(
[image, value],
dtypes=[image.dtype, dtype_target],
increase_itemsize_factor=1)
image = np.add(image, value, out=image, casting="no")
return iadt.restore_dtypes_(image, input_dtype)
def add_elementwise(image, values):
"""Add an array of values to an image.
This method ensures that ``uint8`` does not overflow during the addition.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
values : ndarray
The values to add to the image. Expected to have the same height
and width as `image` and either no channels or one channel or
the same number of channels as `image`.
This array is expected to have dtype ``int8``, ``int16``, ``int32``,
``uint8``, ``uint16``, ``float32``, ``float64``. Other dtypes may
or may not work.
For ``uint8`` inputs, only `value` arrays with values in the interval
``[-1000, 1000]`` are supported. Values beyond that interval may
result in an output array of zeros (no error is raised due to
performance reasons).
Returns
-------
ndarray
Image with values added to it.
"""
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 int8 int16 float16 float32",
disallowed="uint32 uint64 int32 int64 float64 float128",
augmenter=None)
if image.dtype == iadt._UINT8_DTYPE:
vdt = values.dtype
valid_value_dtypes_cv2 = iadt._convert_dtype_strs_to_types(
"int8 int16 int32 uint8 uint16 float32 float64"
)
ishape = image.shape
is_image_valid_shape_cv2 = (
(
len(ishape) == 2
or (len(ishape) == 3 and ishape[-1] <= 512)
)
and 0 not in ishape
)
use_cv2 = (
is_image_valid_shape_cv2
and vdt in valid_value_dtypes_cv2
)
if use_cv2:
return _add_elementwise_cv2_to_uint8(image, values)
return _add_elementwise_np_to_uint8(image, values)
return _add_elementwise_np_to_non_uint8(image, values)
def _add_elementwise_cv2_to_uint8(image, values):
ind, vnd = image.ndim, values.ndim
valid_vnd = [ind] if ind == 2 else [ind-1, ind]
assert vnd in valid_vnd, (
"Expected values with any of %s dimensions, "
"got %d dimensions (shape %s vs. image shape %s)." % (
valid_vnd, vnd, values.shape, image.shape
)
)
if vnd == ind - 1:
values = values[:, :, np.newaxis]
if values.shape[-1] == 1:
values = np.broadcast_to(values, image.shape)
# add does not seem to require normalization
result = cv2.add(image, values, dtype=cv2.CV_8U)
if result.ndim == 2 and ind == 3:
return result[:, :, np.newaxis]
return result
def _add_elementwise_np_to_uint8(image, values):
# This special uint8 block is around 60-100% faster than the
# corresponding non-uint8 function further below (more speedup
# for smaller images).
#
# Also tested to instead compute min/max of image and value
# and then only convert image/value dtype if actually
# necessary, but that was like 20-30% slower, even for 224x224
# images.
#
if values.dtype.kind == "f":
values = np.round(values)
image = image.astype(np.int16)
values = np.clip(values, -255, 255).astype(np.int16)
image_aug = image + values
image_aug = np.clip(image_aug, 0, 255).astype(np.uint8)
return image_aug
def _add_elementwise_np_to_non_uint8(image, values):
# We limit here the value range of the value parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
#
# We need 2* the itemsize of the image here to allow to shift
# the image's max value to the lowest possible value, e.g. for
# uint8 it must allow for -255 to 255.
if image.dtype.kind != "f" and values.dtype.kind == "f":
values = np.round(values)
input_shape = image.shape
input_dtype = image.dtype
if image.ndim == 2:
image = image[..., np.newaxis]
if values.ndim == 2:
values = values[..., np.newaxis]
nb_channels = image.shape[-1]
itemsize = image.dtype.itemsize * 2
dtype_target = np.dtype("%s%d" % (values.dtype.kind, itemsize))
values = iadt.clip_to_dtype_value_range_(values, dtype_target,
validate=100)
if values.shape[2] == 1:
values = np.tile(values, (1, 1, nb_channels))
# Decreased itemsize from 2 to 1 here, see explanation in Add.
image, values = iadt.promote_array_dtypes_(
[image, values],
dtypes=[image.dtype, dtype_target],
increase_itemsize_factor=1)
image = np.add(image, values, out=image, casting="no")
image = iadt.restore_dtypes_(image, input_dtype)
if len(input_shape) == 2:
return image[..., 0]
return image
def multiply_scalar(image, multiplier):
"""Multiply an image by a single scalar or one scalar per channel.
This method ensures that ``uint8`` does not overflow during the
multiplication.
note::
Tests were only conducted for rather small multipliers, around
``-10.0`` to ``+10.0``.
In general, the multipliers sampled from `multiplier` must be in a
value range that corresponds to the input image's dtype. E.g. if the
input image has dtype ``uint16`` and the samples generated from
`multiplier` are ``float64``, this function will still force all
samples to be within the value range of ``float16``, as it has the
same number of bytes (two) as ``uint16``. This is done to make
overflows less likely to occur.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result in
+/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
If `value` contains more than one value, the shape of the image is
expected to be ``(H,W,C)``.
multiplier : number or ndarray
The multiplier to use. Either a single value or an array
containing exactly one component per channel, i.e. ``C`` components.
Returns
-------
ndarray
Image, multiplied by `multiplier`.
"""
return multiply_scalar_(np.copy(image), multiplier)
def multiply_scalar_(image, multiplier):
"""Multiply in-place an image by a single scalar or one scalar per channel.
This method ensures that ``uint8`` does not overflow during the
multiplication.
note::
Tests were only conducted for rather small multipliers, around
``-10.0`` to ``+10.0``.
In general, the multipliers sampled from `multiplier` must be in a
value range that corresponds to the input image's dtype. E.g. if the
input image has dtype ``uint16`` and the samples generated from
`multiplier` are ``float64``, this function will still force all
samples to be within the value range of ``float16``, as it has the
same number of bytes (two) as ``uint16``. This is done to make
overflows less likely to occur.
Added in 0.5.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result in
+/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
If `value` contains more than one value, the shape of the image is
expected to be ``(H,W,C)``.
May be changed in-place.
multiplier : number or ndarray
The multiplier to use. Either a single value or an array
containing exactly one component per channel, i.e. ``C`` components.
Returns
-------
ndarray
Image, multiplied by `multiplier`.
Might be the same image instance as was provided in `image`.
"""
size = image.size
if size == 0:
return image
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 int8 int16 float16 float32",
disallowed="uint32 uint64 int32 int64 float64 float128",
augmenter=None)
if image.dtype == iadt._UINT8_DTYPE:
if size >= 224*224*3:
return _multiply_scalar_to_uint8_lut_(image, multiplier)
return _multiply_scalar_to_uint8_cv2_mul_(image, multiplier)
return _multiply_scalar_to_non_uint8(image, multiplier)
# TODO add a c++/cython method here to compute the LUT tables
# Added in 0.5.0.
def _multiply_scalar_to_uint8_lut_(image, multiplier):
is_single_value = (
ia.is_single_number(multiplier)
or ia.is_np_scalar(multiplier)
or (ia.is_np_array(multiplier) and multiplier.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
multiplier = np.float32(multiplier)
value_range = np.arange(0, 256, dtype=np.float32)
if is_channelwise:
assert multiplier.ndim == 1, (
"Expected `multiplier` to be 1-dimensional, got %d-dimensional "
"data with shape %s." % (multiplier.ndim, multiplier.shape))
assert image.ndim == 3, (
"Expected `image` to be 3-dimensional when multiplying by one "
"value per channel, got %d-dimensional data with shape %s." % (
image.ndim, image.shape))
assert image.shape[-1] == multiplier.size, (
"Expected number of channels in `image` and number of components "
"in `multiplier` to be identical. Got %d vs. %d." % (
image.shape[-1], multiplier.size))
value_range = np.broadcast_to(value_range[:, np.newaxis],
(256, nb_channels))
value_range = value_range * multiplier[np.newaxis, :]
else:
value_range = value_range * multiplier
value_range = np.clip(value_range, 0, 255).astype(image.dtype)
return ia.apply_lut_(image, value_range)
# Added in 0.5.0.
def _multiply_scalar_to_uint8_cv2_mul_(image, multiplier):
# multiplier must already be an array_like
if multiplier.size > 1:
multiplier = multiplier[np.newaxis, np.newaxis, :]
multiplier = np.broadcast_to(multiplier, image.shape)
else:
multiplier = np.full(image.shape, multiplier, dtype=np.float32)
image = _normalize_cv2_input_arr_(image)
result = cv2.multiply(
image,
multiplier,
dtype=cv2.CV_8U,
dst=image
)
return result
def _multiply_scalar_to_non_uint8(image, multiplier):
# TODO estimate via image min/max values whether a resolution
# increase is necessary
input_dtype = image.dtype
is_single_value = (
ia.is_single_number(multiplier)
or ia.is_np_scalar(multiplier)
or (ia.is_np_array(multiplier) and multiplier.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
shape = (1, 1, nb_channels if is_channelwise else 1)
multiplier = np.array(multiplier).reshape(shape)
# deactivated itemsize increase due to clip causing problems
# with int64, see Add
# mul_min = np.min(mul)
# mul_max = np.max(mul)
# is_not_increasing_value_range = (
# (-1 <= mul_min <= 1)
# and (-1 <= mul_max <= 1))
# We limit here the value range of the mul parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
itemsize = max(
image.dtype.itemsize,
2 if multiplier.dtype.kind == "f" else 1
) # float min itemsize is 2 not 1
dtype_target = np.dtype("%s%d" % (multiplier.dtype.kind, itemsize))
multiplier = iadt.clip_to_dtype_value_range_(
multiplier, dtype_target, validate=True)
image, multiplier = iadt.promote_array_dtypes_(
[image, multiplier],
dtypes=[image.dtype, dtype_target],
# increase_itemsize_factor=(
# 1 if is_not_increasing_value_range else 2)
increase_itemsize_factor=1
)
image = np.multiply(image, multiplier, out=image, casting="no")
return iadt.restore_dtypes_(image, input_dtype)
def multiply_elementwise(image, multipliers):
"""Multiply an image with an array of values.
This method ensures that ``uint8`` does not overflow during the addition.
note::
Tests were only conducted for rather small multipliers, around
``-10.0`` to ``+10.0``.
In general, the multipliers sampled from `multipliers` must be in a
value range that corresponds to the input image's dtype. E.g. if the
input image has dtype ``uint16`` and the samples generated from
`multipliers` are ``float64``, this function will still force all
samples to be within the value range of ``float16``, as it has the
same number of bytes (two) as ``uint16``. This is done to make
overflows less likely to occur.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
multipliers : ndarray
The multipliers with which to multiply the image. Expected to have
the same height and width as `image` and either no channels or one
channel or the same number of channels as `image`.
Returns
-------
ndarray
Image, multiplied by `multipliers`.
"""
return multiply_elementwise_(np.copy(image), multipliers)
def multiply_elementwise_(image, multipliers):
"""Multiply in-place an image with an array of values.
This method ensures that ``uint8`` does not overflow during the addition.
note::
Tests were only conducted for rather small multipliers, around
``-10.0`` to ``+10.0``.
In general, the multipliers sampled from `multipliers` must be in a
value range that corresponds to the input image's dtype. E.g. if the
input image has dtype ``uint16`` and the samples generated from
`multipliers` are ``float64``, this function will still force all
samples to be within the value range of ``float16``, as it has the
same number of bytes (two) as ``uint16``. This is done to make
overflows less likely to occur.
Added in 0.5.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
multipliers : ndarray
The multipliers with which to multiply the image. Expected to have
the same height and width as `image` and either no channels or one
channel or the same number of channels as `image`.
Returns
-------
ndarray
Image, multiplied by `multipliers`.
"""
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 int8 int16 float16 float32",
disallowed="uint32 uint64 int32 int64 float64 float128",
augmenter=None)
if 0 in image.shape:
return image
if multipliers.dtype.kind == "b":
# TODO extend this with some shape checks
image *= multipliers
return image
if image.dtype == iadt._UINT8_DTYPE:
return _multiply_elementwise_to_uint8_(image, multipliers)
return _multiply_elementwise_to_non_uint8(image, multipliers)
# Added in 0.5.0.
def _multiply_elementwise_to_uint8_(image, multipliers):
dt = multipliers.dtype
kind = dt.kind
if kind == "f" and dt != iadt._FLOAT32_DTYPE:
multipliers = multipliers.astype(np.float32)
elif kind == "i" and dt != iadt._INT32_DTYPE:
multipliers = multipliers.astype(np.int32)
elif kind == "u" and dt != iadt._UINT8_DTYPE:
multipliers = multipliers.astype(np.uint8)
if multipliers.ndim < image.ndim:
multipliers = multipliers[:, :, np.newaxis]
if multipliers.shape != image.shape:
multipliers = np.broadcast_to(multipliers, image.shape)
assert image.shape == multipliers.shape, (
"Expected multipliers to have shape (H,W) or (H,W,1) or (H,W,C) "
"(H = image height, W = image width, C = image channels). Reached "
"shape %s after broadcasting, compared to image shape %s." % (
multipliers.shape, image.shape
)
)
# views seem to be fine here
if image.flags["C_CONTIGUOUS"] is False:
image = np.ascontiguousarray(image)
result = cv2.multiply(image, multipliers, dst=image, dtype=cv2.CV_8U)
return result
def _multiply_elementwise_to_non_uint8(image, multipliers):
input_dtype = image.dtype
# TODO maybe introduce to stochastic parameters some way to
# get the possible min/max values, could make things
# faster for dropout to get 0/1 min/max from the binomial
# itemsize decrease is currently deactivated due to issues
# with clip and int64, see Add
mul_min = np.min(multipliers)
mul_max = np.max(multipliers)
# is_not_increasing_value_range = (
# (-1 <= mul_min <= 1) and (-1 <= mul_max <= 1))
# We limit here the value range of the mul parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
itemsize = max(
image.dtype.itemsize,
2 if multipliers.dtype.kind == "f" else 1
) # float min itemsize is 2
dtype_target = np.dtype("%s%d" % (multipliers.dtype.kind, itemsize))
multipliers = iadt.clip_to_dtype_value_range_(
multipliers, dtype_target,
validate=True, validate_values=(mul_min, mul_max))
if multipliers.shape[2] == 1:
# TODO check if tile() is here actually needed
nb_channels = image.shape[-1]
multipliers = np.tile(multipliers, (1, 1, nb_channels))
image, multipliers = iadt.promote_array_dtypes_(
[image, multipliers],
dtypes=[image, dtype_target],
increase_itemsize_factor=1
# increase_itemsize_factor=(
# 1 if is_not_increasing_value_range else 2)
)
image = np.multiply(image, multipliers, out=image, casting="no")
return iadt.restore_dtypes_(image, input_dtype)
def cutout(image, x1, y1, x2, y2,
fill_mode="constant", cval=0, fill_per_channel=False,
seed=None):
"""Fill a single area within an image using a fill mode.
This cutout method uses the top-left and bottom-right corner coordinates
of the cutout region given as absolute pixel values.
.. note::
Gaussian fill mode will assume that float input images contain values
in the interval ``[0.0, 1.0]`` and hence sample values from a
gaussian within that interval, i.e. from ``N(0.5, std=0.5/3)``.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
Added in 0.4.0.
Parameters
----------
image : ndarray
Image to modify.
x1 : number
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
y1 : number
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
x2 : number
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
y2 : number
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
fill_mode : {'constant', 'gaussian'}, optional
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
cval : number or tuple of number, optional
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
fill_per_channel : number or bool, optional
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
Returns
-------
ndarray
Image with area filled in.
"""
return cutout_(np.copy(image),
x1, y1, x2, y2,
fill_mode, cval, fill_per_channel, seed)
def cutout_(image, x1, y1, x2, y2,
fill_mode="constant", cval=0, fill_per_channel=False,
seed=None):
"""Fill a single area within an image using a fill mode (in-place).
This cutout method uses the top-left and bottom-right corner coordinates
of the cutout region given as absolute pixel values.
.. note::
Gaussian fill mode will assume that float input images contain values
in the interval ``[0.0, 1.0]`` and hence sample values from a
gaussian within that interval, i.e. from ``N(0.5, std=0.5/3)``.
Added in 0.4.0.
**Supported dtypes**:
minimum of (
:func:`~imgaug.augmenters.arithmetic._fill_rectangle_gaussian_`,
:func:`~imgaug.augmenters.arithmetic._fill_rectangle_constant_`
)
Parameters
----------
image : ndarray
Image to modify. Might be modified in-place.
x1 : number
X-coordinate of the top-left corner of the cutout region.
y1 : number
Y-coordinate of the top-left corner of the cutout region.
x2 : number
X-coordinate of the bottom-right corner of the cutout region.
y2 : number
Y-coordinate of the bottom-right corner of the cutout region.
fill_mode : {'constant', 'gaussian'}, optional
Fill mode to use.
cval : number or tuple of number, optional
The constant value to use when filling with mode ``constant``.
May be an intensity value or color tuple.
fill_per_channel : number or bool, optional
Whether to fill in a channelwise fashion.
If number then a value ``>=0.5`` will be interpreted as ``True``.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
A random number generator to sample random values from.
Usually an integer seed value or an ``RNG`` instance.
See :class:`imgaug.random.RNG` for details.
Returns
-------
ndarray
Image with area filled in.
The input image might have been modified in-place.
"""
import importlib
height, width = image.shape[0:2]
x1 = min(max(int(x1), 0), width)
y1 = min(max(int(y1), 0), height)
x2 = min(max(int(x2), 0), width)
y2 = min(max(int(y2), 0), height)
if x2 > x1 and y2 > y1:
assert fill_mode in _CUTOUT_FILL_MODES, (
"Expected one of the following fill modes: %s. "
"Got: %s." % (
str(list(_CUTOUT_FILL_MODES.keys())), fill_mode))
module_name, fname = _CUTOUT_FILL_MODES[fill_mode]
module = importlib.import_module(module_name)
func = getattr(module, fname)
image = func(
image,
x1=x1, y1=y1, x2=x2, y2=y2,
cval=cval,
per_channel=(fill_per_channel >= 0.5),
random_state=(
iarandom.RNG(seed)
if not isinstance(seed, iarandom.RNG)
else seed) # only RNG(.) without "if" is ~8x slower
)
return image
def _fill_rectangle_gaussian_(image, x1, y1, x2, y2, cval, per_channel,
random_state):
"""Fill a rectangular image area with samples from a gaussian.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: limited; tested (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: limited; tested (1)
* ``float16``: yes; tested (2)
* ``float32``: yes; tested (2)
* ``float64``: yes; tested (2)
* ``float128``: limited; tested (1) (2)
* ``bool``: yes; tested
- (1) Possible loss of resolution due to gaussian values being sampled
as ``float64`` s.
- (2) Float input arrays are assumed to be in interval ``[0.0, 1.0]``
and all gaussian samples are within that interval too.
"""
# for float we assume value range [0.0, 1.0]
# that matches the common use case and also makes the tests way easier
# we also set bool here manually as the center value returned by
# get_value_range_for_dtype() is None
kind = image.dtype.kind
if kind in ["f", "b"]:
min_value = 0.0
center_value = 0.5
max_value = 1.0
else:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(
image.dtype)
# set standard deviation to 1/3 of value range to get 99.7% of values
# within [min v.r., max v.r.]
# we also divide by 2 because we want to spread towards the
# "left"/"right" of the center value by half of the value range
stddev = (float(max_value) - float(min_value)) / 2.0 / 3.0
height = y2 - y1
width = x2 - x1
shape = (height, width)
if per_channel and image.ndim == 3:
shape = shape + (image.shape[2],)
rect = random_state.normal(center_value, stddev, size=shape)
if image.dtype.kind == "b":
rect_vr = (rect > 0.5)
else:
rect_vr = np.clip(rect, min_value, max_value).astype(image.dtype)
if image.ndim == 3:
image[y1:y2, x1:x2, :] = np.atleast_3d(rect_vr)
else:
image[y1:y2, x1:x2] = rect_vr
return image
def _fill_rectangle_constant_(image, x1, y1, x2, y2, cval, per_channel,
random_state):
"""Fill a rectangular area within an image with constant value(s).
`cval` may be a single value or one per channel. If the number of items
in `cval` does not match the number of channels in `image`, it may
be tiled up to the number of channels.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
"""
if ia.is_iterable(cval):
if per_channel:
nb_channels = None if image.ndim == 2 else image.shape[-1]
if nb_channels is None:
cval = cval[0]
elif len(cval) < nb_channels:
mul = int(np.ceil(nb_channels / len(cval)))
cval = np.tile(cval, (mul,))[0:nb_channels]
elif len(cval) > nb_channels:
cval = cval[0:nb_channels]
else:
cval = cval[0]
# without the array(), uint64 max value is assigned as 0
image[y1:y2, x1:x2, ...] = np.array(cval, dtype=image.dtype)
return image
def replace_elementwise_(image, mask, replacements):
"""Replace components in an image array with new values.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no
* ``bool``: yes; tested
- (1) ``uint64`` is currently not supported, because
:func:`~imgaug.dtypes.clip_to_dtype_value_range_()` does not
support it, which again is because numpy.clip() seems to not
support it.
- (2) `int64` is disallowed due to being converted to `float64`
by :func:`numpy.clip` since 1.17 (possibly also before?).
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
mask : ndarray
Mask of shape ``(H,W,[C])`` denoting which components to replace.
If ``C`` is provided, it must be ``1`` or match the ``C`` of `image`.
May contain floats in the interval ``[0.0, 1.0]``.
replacements : iterable
Replacements to place in `image` at the locations defined by `mask`.
This 1-dimensional iterable must contain exactly as many values
as there are replaced components in `image`.
Returns
-------
ndarray
Image with replaced components.
"""
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 uint32 int8 int16 int32 float16 float32 "
"float64",
disallowed="uint64 int64 float128",
augmenter=None
)
# This is slightly faster (~20%) for masks that are True at many
# locations, but slower (~50%) for masks with few Trues, which is
# probably the more common use-case:
#
# replacement_samples = self.replacement.draw_samples(
# sampling_shape, random_state=rs_replacement)
#
# # round, this makes 0.2 e.g. become 0 in case of boolean
# # image (otherwise replacing values with 0.2 would
# # lead to True instead of False).
# if (image.dtype.kind in ["i", "u", "b"]
# and replacement_samples.dtype.kind == "f"):
# replacement_samples = np.round(replacement_samples)
#
# replacement_samples = iadt.clip_to_dtype_value_range_(
# replacement_samples, image.dtype, validate=False)
# replacement_samples = replacement_samples.astype(
# image.dtype, copy=False)
#
# if sampling_shape[2] == 1:
# mask_samples = np.tile(mask_samples, (1, 1, nb_channels))
# replacement_samples = np.tile(
# replacement_samples, (1, 1, nb_channels))
# mask_thresh = mask_samples > 0.5
# image[mask_thresh] = replacement_samples[mask_thresh]
input_shape = image.shape
if image.ndim == 2:
image = image[..., np.newaxis]
if mask.ndim == 2:
mask = mask[..., np.newaxis]
mask_thresh = mask > 0.5
if mask.shape[2] == 1:
nb_channels = image.shape[-1]
# TODO verify if tile() is here really necessary
mask_thresh = np.tile(mask_thresh, (1, 1, nb_channels))
# round, this makes 0.2 e.g. become 0 in case of boolean
# image (otherwise replacing values with 0.2 would lead to True
# instead of False).
if image.dtype.kind in ["i", "u", "b"] and replacements.dtype.kind == "f":
replacements = np.round(replacements)
replacement_samples = iadt.clip_to_dtype_value_range_(
replacements, image.dtype, validate=False)
replacement_samples = replacement_samples.astype(image.dtype, copy=False)
image[mask_thresh] = replacement_samples
if len(input_shape) == 2:
return image[..., 0]
return image
def invert(image, min_value=None, max_value=None, threshold=None,
invert_above_threshold=True):
"""Invert an array.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.invert_`.
Parameters
----------
image : ndarray
See :func:`invert_`.
min_value : None or number, optional
See :func:`invert_`.
max_value : None or number, optional
See :func:`invert_`.
threshold : None or number, optional
See :func:`invert_`.
invert_above_threshold : bool, optional
See :func:`invert_`.
Returns
-------
ndarray
Inverted image.
"""
return invert_(np.copy(image), min_value=min_value, max_value=max_value,
threshold=threshold,
invert_above_threshold=invert_above_threshold)
def invert_(image, min_value=None, max_value=None, threshold=None,
invert_above_threshold=True):
"""Invert an array in-place.
Added in 0.4.0.
**Supported dtypes**:
if (min_value=None and max_value=None):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
if (min_value!=None or max_value!=None):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: no (2)
* ``float128``: no (3)
* ``bool``: no (4)
- (1) Not allowed due to numpy's clip converting from ``uint64`` to
``float64``.
- (2) Not allowed as int/float have to be increased in resolution
when using min/max values.
- (3) Not tested.
- (4) Makes no sense when using min/max values.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
The array *might* be modified in-place.
min_value : None or number, optional
Minimum of the value range of input images, e.g. ``0`` for ``uint8``
images. If set to ``None``, the value will be automatically derived
from the image's dtype.
max_value : None or number, optional
Maximum of the value range of input images, e.g. ``255`` for ``uint8``
images. If set to ``None``, the value will be automatically derived
from the image's dtype.
threshold : None or number, optional
A threshold to use in order to invert only numbers above or below
the threshold. If ``None`` no thresholding will be used.
invert_above_threshold : bool, optional
If ``True``, only values ``>=threshold`` will be inverted.
Otherwise, only values ``<threshold`` will be inverted.
If `threshold` is ``None`` this parameter has no effect.
Returns
-------
ndarray
Inverted image. This *can* be the same array as input in `image`,
modified in-place.
"""
if image.size == 0:
return image
# when no custom min/max are chosen, all bool, uint, int and float dtypes
# should be invertable (float tested only up to 64bit)
# when chosing custom min/max:
# - bool makes no sense, not allowed
# - int and float must be increased in resolution if custom min/max values
# are chosen, hence they are limited to 32 bit and below
# - uint64 is converted by numpy's clip to float64, hence loss of accuracy
# - float16 seems to not be perfectly accurate, but still ok-ish -- was
# off by 10 for center value of range (float 16 min, 16), where float
# 16 min is around -65500
allow_dtypes_custom_minmax = iadt._convert_dtype_strs_to_types(
"uint8 uint16 uint32 int8 int16 int32 float16 float32"
)
min_value_dt, _, max_value_dt = \
iadt.get_value_range_of_dtype(image.dtype)
min_value = (min_value_dt
if min_value is None else min_value)
max_value = (max_value_dt
if max_value is None else max_value)
assert min_value >= min_value_dt, (
"Expected min_value to be above or equal to dtype's min "
"value, got %s (vs. min possible %s for %s)" % (
str(min_value), str(min_value_dt), image.dtype.name)
)
assert max_value <= max_value_dt, (
"Expected max_value to be below or equal to dtype's max "
"value, got %s (vs. max possible %s for %s)" % (
str(max_value), str(max_value_dt), image.dtype.name)
)
assert min_value < max_value, (
"Expected min_value to be below max_value, got %s "
"and %s" % (
str(min_value), str(max_value))
)
if min_value != min_value_dt or max_value != max_value_dt:
assert image.dtype in allow_dtypes_custom_minmax, (
"Can use custom min/max values only with the following "
"dtypes: %s. Got: %s." % (
", ".join(allow_dtypes_custom_minmax), image.dtype.name))
if image.dtype == iadt._UINT8_DTYPE:
return _invert_uint8_(image, min_value, max_value, threshold,
invert_above_threshold)
dtype_kind_to_invert_func = {
"b": _invert_bool,
"u": _invert_uint16_or_larger_, # uint8 handled above
"i": _invert_int_,
"f": _invert_float
}
func = dtype_kind_to_invert_func[image.dtype.kind]
if threshold is None:
return func(image, min_value, max_value)
arr_inv = func(np.copy(image), min_value, max_value)
if invert_above_threshold:
mask = (image >= threshold)
else:
mask = (image < threshold)
image[mask] = arr_inv[mask]
return image
def _invert_bool(arr, min_value, max_value):
assert min_value == 0 and max_value == 1, (
"min_value and max_value must be 0 and 1 for bool arrays. "
"Got %.4f and %.4f." % (min_value, max_value))
return ~arr
# Added in 0.4.0.
def _invert_uint8_(arr, min_value, max_value, threshold,
invert_above_threshold):
shape = arr.shape
nb_channels = shape[-1] if len(shape) == 3 else 1
valid_for_cv2 = (
threshold is None
and min_value == 0
and len(shape) >= 2
and shape[0]*shape[1]*nb_channels != 4
)
if valid_for_cv2:
return _invert_uint8_subtract_(arr, max_value)
return _invert_uint8_lut_pregenerated_(
arr, min_value, max_value, threshold, invert_above_threshold
)
# Added in 0.5.0.
def _invert_uint8_lut_pregenerated_(arr, min_value, max_value, threshold,
invert_above_threshold):
table = _InvertTablesSingleton.get_instance().get_table(
min_value=min_value,
max_value=max_value,
threshold=threshold,
invert_above_threshold=invert_above_threshold
)
arr = ia.apply_lut_(arr, table)
return arr
# Added in 0.5.0.
def _invert_uint8_subtract_(arr, max_value):
# seems to work with arr.base.shape[0] > 1
if arr.base is not None and arr.base.shape[0] == 1:
arr = np.copy(arr)
if not arr.flags["C_CONTIGUOUS"]:
arr = np.ascontiguousarray(arr)
input_shape = arr.shape
if len(input_shape) > 2 and input_shape[-1] > 1:
arr = arr.ravel()
# This also supports a mask, which would help for thresholded invert, but
# it seems that all non-masked components are set to zero in the output
# array. Tackling this issue seems to rather require more time than just
# using a LUT.
arr = cv2.subtract(int(max_value), arr, dst=arr)
if arr.shape != input_shape:
return arr.reshape(input_shape)
return arr
# Added in 0.4.0.
def _invert_uint16_or_larger_(arr, min_value, max_value):
min_max_is_vr = (min_value == 0
and max_value == np.iinfo(arr.dtype).max)
if min_max_is_vr:
return max_value - arr
return _invert_by_distance(
np.clip(arr, min_value, max_value),
min_value, max_value
)
# Added in 0.4.0.
def _invert_int_(arr, min_value, max_value):
# note that for int dtypes the max value is
# (-1) * min_value - 1
# e.g. -128 and 127 (min/max) for int8
# mapping example:
# [-4, -3, -2, -1, 0, 1, 2, 3]
# will be mapped to
# [ 3, 2, 1, 0, -1, -2, -3, -4]
# hence we can not simply compute the inverse as:
# after = (-1) * before
# but instead need
# after = (-1) * before - 1
# however, this exceeds the value range for the minimum value, e.g.
# for int8: -128 -> 128 -> 127, where 128 exceeds it. Hence, we must
# compute the inverse via a mask (extra step for the minimum)
# or we have to increase the resolution of the array. Here, a
# two-step approach is used.
if min_value == (-1) * max_value - 1:
arr_inv = np.copy(arr)
mask = (arr_inv == min_value)
# there is probably a one-liner here to do this, but
# ((-1) * (arr_inv * ~mask) - 1) + mask * max_value
# has the disadvantage of inverting min_value to max_value - 1
# while
# ((-1) * (arr_inv * ~mask) - 1) + mask * (max_value+1)
# ((-1) * (arr_inv * ~mask) - 1) + mask * max_value + mask
# both sometimes increase the dtype resolution (e.g. int32 to int64)
arr_inv[mask] = max_value
arr_inv[~mask] = (-1) * arr_inv[~mask] - 1
return arr_inv
return _invert_by_distance(
np.clip(arr, min_value, max_value),
min_value, max_value
)
def _invert_float(arr, min_value, max_value):
if np.isclose(max_value, (-1)*min_value, rtol=0):
return (-1) * arr
return _invert_by_distance(
np.clip(arr, min_value, max_value),
min_value, max_value
)
def _invert_by_distance(arr, min_value, max_value):
arr_inv = arr
if arr.dtype.kind in ["i", "f"]:
arr_inv = iadt.increase_array_resolutions_([np.copy(arr)], 2)[0]
distance_from_min = np.abs(arr_inv - min_value) # d=abs(v-min)
arr_inv = max_value - distance_from_min # v'=MAX-d
# due to floating point inaccuracies, we might exceed the min/max
# values for floats here, hence clip this happens especially for
# values close to the float dtype's maxima
if arr.dtype.kind == "f":
arr_inv = np.clip(arr_inv, min_value, max_value)
if arr.dtype.kind in ["i", "f"]:
arr_inv = iadt.restore_dtypes_(
arr_inv, arr.dtype, clip=False)
return arr_inv
# Added in 0.4.0.
def _generate_table_for_invert_uint8(min_value, max_value, threshold,
invert_above_threshold):
table = np.arange(256).astype(np.int32)
full_value_range = (min_value == 0 and max_value == 255)
if full_value_range:
table_inv = table[::-1]
else:
distance_from_min = np.abs(table - min_value)
table_inv = max_value - distance_from_min
table_inv = np.clip(table_inv, min_value, max_value).astype(np.uint8)
if threshold is not None:
table = table.astype(np.uint8)
if invert_above_threshold:
table_inv = np.concatenate([
table[0:int(threshold)],
table_inv[int(threshold):]
], axis=0)
else:
table_inv = np.concatenate([
table_inv[0:int(threshold)],
table[int(threshold):]
], axis=0)
return table_inv
# Added in 0.5.0.
class _InvertTables(object):
# Added in 0.5.0.
def __init__(self):
self.tables = {}
# Added in 0.5.0.
def get_table(self, min_value, max_value, threshold,
invert_above_threshold):
if min_value == 0 and max_value == 255:
key = (threshold, invert_above_threshold)
table = self.tables.get(key, None)
if table is None:
table = _generate_table_for_invert_uint8(
min_value, max_value, threshold, invert_above_threshold
)
self.tables[key] = table
return table
return _generate_table_for_invert_uint8(
min_value, max_value, threshold, invert_above_threshold
)
# Added in 0.5.0.
class _InvertTablesSingleton(object):
_INSTANCE = None
# Added in 0.5.0.
@classmethod
def get_instance(cls):
if cls._INSTANCE is None:
cls._INSTANCE = _InvertTables()
return cls._INSTANCE
def solarize(image, threshold=128):
"""Invert pixel values above a threshold.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.solarize_`.
Parameters
----------
image : ndarray
See :func:`solarize_`.
threshold : None or number, optional
See :func:`solarize_`.
Returns
-------
ndarray
Inverted image.
"""
return solarize_(np.copy(image), threshold=threshold)
def solarize_(image, threshold=128):
"""Invert pixel values above a threshold in-place.
This function is a wrapper around :func:`invert`.
This function performs the same transformation as
:func:`PIL.ImageOps.solarize`.
Added in 0.4.0.
**Supported dtypes**:
See ``~imgaug.augmenters.arithmetic.invert_(min_value=None and max_value=None)``.
Parameters
----------
image : ndarray
See :func:`invert_`.
threshold : None or number, optional
See :func:`invert_`.
Note: The default threshold is optimized for ``uint8`` images.
Returns
-------
ndarray
Inverted image. This *can* be the same array as input in `image`,
modified in-place.
"""
return invert_(image, threshold=threshold)
def compress_jpeg(image, compression):
"""Compress an image using jpeg compression.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : ndarray
Image of dtype ``uint8`` and shape ``(H,W,[C])``. If ``C`` is provided,
it must be ``1`` or ``3``.
compression : int
Strength of the compression in the interval ``[0, 100]``.
Returns
-------
ndarray
Input image after applying jpeg compression to it and reloading
the result into a new array. Same shape and dtype as the input.
"""
import PIL.Image
if image.size == 0:
return np.copy(image)
# The value range 1 to 95 is suggested by PIL's save() documentation
# Values above 95 seem to not make sense (no improvement in visual
# quality, but large file size).
# A value of 100 would mostly deactivate jpeg compression.
# A value of 0 would lead to no compression (instead of maximum
# compression).
# We use range 1 to 100 here, because this augmenter is about
# generating images for training and not for saving, hence we do not
# care about large file sizes.
maximum_quality = 100
minimum_quality = 1
iadt.allow_only_uint8({image.dtype})
assert 0 <= compression <= 100, (
"Expected compression to be in the interval [0, 100], "
"got %.4f." % (compression,))
has_no_channels = (image.ndim == 2)
is_single_channel = (image.ndim == 3 and image.shape[-1] == 1)
if is_single_channel:
image = image[..., 0]
assert has_no_channels or is_single_channel or image.shape[-1] == 3, (
"Expected either a grayscale image of shape (H,W) or (H,W,1) or an "
"RGB image of shape (H,W,3). Got shape %s." % (image.shape,))
# Map from compression to quality used by PIL
# We have valid compressions from 0 to 100, i.e. 101 possible
# values
quality = int(
np.clip(
np.round(
minimum_quality
+ (maximum_quality - minimum_quality)
* (1.0 - (compression / 101))
),
minimum_quality,
maximum_quality
)
)
image_pil = PIL.Image.fromarray(image)
with tempfile.NamedTemporaryFile(mode="wb+", suffix=".jpg") as f:
image_pil.save(f, quality=quality)
# Read back from file.
# We dont read from f.name, because that leads to PermissionDenied
# errors on Windows. We add f.seek(0) here, because otherwise we get
# `SyntaxError: index out of range` in PIL.
f.seek(0)
pilmode = "RGB"
if has_no_channels or is_single_channel:
pilmode = "L"
image = imageio.imread(f, pilmode=pilmode, format="jpeg")
if is_single_channel:
image = image[..., np.newaxis]
return image
class Add(meta.Augmenter):
"""
Add a value to all pixels in an image.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.add_scalar`.
Parameters
----------
value : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Value to add to all pixels.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, then a value from the discrete
interval ``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a value will be sampled per
image from that parameter.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Add(10)
Always adds a value of 10 to all channels of all pixels of all input
images.
>>> aug = iaa.Add((-10, 10))
Adds a value from the discrete interval ``[-10..10]`` to all pixels of
input images. The exact value is sampled per image.
>>> aug = iaa.Add((-10, 10), per_channel=True)
Adds a value from the discrete interval ``[-10..10]`` to all pixels of
input images. The exact value is sampled per image *and* channel,
i.e. to a red-channel it might add 5 while subtracting 7 from the
blue channel of the same image.
>>> aug = iaa.Add((-10, 10), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, value=(-20, 20), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Add, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.value = iap.handle_continuous_param(
value, "value", value_range=None, tuple_to_uniform=True,
list_to_choice=True, prefetch=True)
self.per_channel = iap.handle_probability_param(
per_channel, "per_channel")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
rss = random_state.duplicate(2)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
value_samples = self.value.draw_samples(
(nb_images, nb_channels_max), random_state=rss[1])
gen = enumerate(zip(images, value_samples, per_channel_samples))
for i, (image, value_samples_i, per_channel_samples_i) in gen:
nb_channels = image.shape[2]
# Example code to directly add images via image+sample (uint8 only)
# if per_channel_samples_i > 0.5:
# result = []
# image = image.astype(np.int16)
# value_samples_i = value_samples_i.astype(np.int16)
# for c, value in enumerate(value_samples_i[0:nb_channels]):
# result.append(
# np.clip(
# image[..., c:c+1] + value, 0, 255
# ).astype(np.uint8))
# images[i] = np.concatenate(result, axis=2)
# else:
# images[i] = np.clip(
# image.astype(np.int16)
# + value_samples_i[0].astype(np.int16),
# 0, 255
# ).astype(np.uint8)
if per_channel_samples_i > 0.5:
value = value_samples_i[0:nb_channels]
else:
# the if/else here catches the case of the channel axis being 0
value = value_samples_i[0] if value_samples_i.size > 0 else []
batch.images[i] = add_scalar_(image, value)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.value, self.per_channel]
# TODO merge this with Add
class AddElementwise(meta.Augmenter):
"""
Add to the pixels of images values that are pixelwise randomly sampled.
While the ``Add`` Augmenter samples one value to add *per image* (and
optionally per channel), this augmenter samples different values per image
and *per pixel* (and optionally per channel), i.e. intensities of
neighbouring pixels may be increased/decreased by different amounts.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.add_elementwise`.
Parameters
----------
value : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Value to add to the pixels.
* If an int, exactly that value will always be used.
* If a tuple ``(a, b)``, then values from the discrete interval
``[a..b]`` will be sampled per image and pixel.
* If a list of integers, a random value will be sampled from the
list per image and pixel.
* If a ``StochasticParameter``, then values will be sampled per
image and pixel from that parameter.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AddElementwise(10)
Always adds a value of 10 to all channels of all pixels of all input
images.
>>> aug = iaa.AddElementwise((-10, 10))
Samples per image and pixel a value from the discrete interval
``[-10..10]`` and adds that value to the respective pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=True)
Samples per image, pixel *and also channel* a value from the discrete
interval ``[-10..10]`` and adds it to the respective pixel's channel value.
Therefore, added values may differ between channels of the same pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, value=(-20, 20), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(AddElementwise, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.value = iap.handle_continuous_param(
value, "value", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(
per_channel, "per_channel")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
gen = enumerate(zip(images, per_channel_samples, rss[1:]))
for i, (image, per_channel_samples_i, rs) in gen:
height, width, nb_channels = image.shape
sample_shape = (height,
width,
nb_channels if per_channel_samples_i > 0.5 else 1)
values = self.value.draw_samples(sample_shape, random_state=rs)
batch.images[i] = add_elementwise(image, values)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.value, self.per_channel]
# TODO rename to AddGaussianNoise?
# TODO examples say that iaa.AdditiveGaussianNoise(scale=(0, 0.1*255)) samples
# the scale from the uniform dist. per image, but is that still the case?
# AddElementwise seems to now sample once for all images, which should
# lead to a single scale value.
class AdditiveGaussianNoise(AddElementwise):
"""
Add noise sampled from gaussian distributions elementwise to images.
This augmenter samples and adds noise elementwise, i.e. it can add
different noise values to neighbouring pixels and is comparable
to ``AddElementwise``.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.AddElementwise`.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution from which the noise is sampled.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>=0``. If ``0`` then `loc` will simply be added to all
pixels.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
Adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
The samples are drawn per image and pixel.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
Adds gaussian noise from the distribution ``N(0, s)`` to images,
where ``s`` is sampled per image from the interval ``[0, 0.1*255]``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
Adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per image and pixel *and* channel (e.g.
a different one for red, green and blue channels of the same pixel).
This leads to "colorful" noise.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, loc=0, scale=(0, 15), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
loc2 = iap.handle_continuous_param(
loc, "loc", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
scale2 = iap.handle_continuous_param(
scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
value = iap.Normal(loc=loc2, scale=scale2)
super(AdditiveGaussianNoise, self).__init__(
value, per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO add tests
# TODO rename to AddLaplaceNoise?
class AdditiveLaplaceNoise(AddElementwise):
"""
Add noise sampled from laplace distributions elementwise to images.
The laplace distribution is similar to the gaussian distribution, but
puts more weight on the long tail. Hence, this noise will add more
outliers (very high/low values). It is somewhere between gaussian noise and
salt and pepper noise.
Values of around ``255 * 0.05`` for `scale` lead to visible noise (for
``uint8``).
Values of around ``255 * 0.10`` for `scale` lead to very visible
noise (for ``uint8``).
It is recommended to usually set `per_channel` to ``True``.
This augmenter samples and adds noise elementwise, i.e. it can add
different noise values to neighbouring pixels and is comparable
to ``AddElementwise``.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.AddElementwise`.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the laplace distribution that generates the noise.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the laplace distribution that generates the noise.
Must be ``>=0``. If ``0`` then only `loc` will be used.
Recommended to be around ``255*0.05``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images.
The samples are drawn per image and pixel.
>>> aug = iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255))
Adds laplace noise from the distribution ``Laplace(0, s)`` to images,
where ``s`` is sampled per image from the interval ``[0, 0.1*255]``.
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=True)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images,
where the noise value is different per image and pixel *and* channel (e.g.
a different one for the red, green and blue channels of the same pixel).
This leads to "colorful" noise.
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, loc=0, scale=(0, 15), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
loc2 = iap.handle_continuous_param(
loc, "loc", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
scale2 = iap.handle_continuous_param(
scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
value = iap.Laplace(loc=loc2, scale=scale2)
super(AdditiveLaplaceNoise, self).__init__(
value,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO add tests
# TODO rename to AddPoissonNoise?
class AdditivePoissonNoise(AddElementwise):
"""
Add noise sampled from poisson distributions elementwise to images.
Poisson noise is comparable to gaussian noise, as e.g. generated via
``AdditiveGaussianNoise``. As poisson distributions produce only positive
numbers, the sign of the sampled values are here randomly flipped.
Values of around ``10.0`` for `lam` lead to visible noise (for ``uint8``).
Values of around ``20.0`` for `lam` lead to very visible noise (for
``uint8``).
It is recommended to usually set `per_channel` to ``True``.
This augmenter samples and adds noise elementwise, i.e. it can add
different noise values to neighbouring pixels and is comparable
to ``AddElementwise``.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.AddElementwise`.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lambda parameter of the poisson distribution. Must be ``>=0``.
Recommended values are around ``0.0`` to ``10.0``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AdditivePoissonNoise(lam=5.0)
Adds poisson noise sampled from a poisson distribution with a ``lambda``
parameter of ``5.0`` to images.
The samples are drawn per image and pixel.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 15.0))
Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is
randomly sampled per image from the interval ``[0.0, 15.0]``.
>>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True)
Adds poisson noise sampled from ``Poisson(5.0)`` to images,
where the values are different per image and pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 15.0), per_channel=True)
Adds poisson noise sampled from ``Poisson(x)`` to images,
with ``x`` being sampled from ``uniform(0.0, 15.0)`` per image and
channel. This is the *recommended* configuration.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 15.0), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, lam=(0.0, 15.0), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
lam2 = iap.handle_continuous_param(
lam, "lam",
value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
value = iap.RandomSign(iap.Poisson(lam=lam2))
super(AdditivePoissonNoise, self).__init__(
value,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class Multiply(meta.Augmenter):
"""
Multiply all pixels in an image with a random value sampled once per image.
This augmenter can be used to make images lighter or darker.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.multiply_scalar`.
Parameters
----------
mul : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The value with which to multiply the pixel values in each image.
* If a number, then that value will always be used.
* If a tuple ``(a, b)``, then a value from the interval ``[a, b]``
will be sampled per image and used for all pixels.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, then that parameter will be used to
sample a new value per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Multiply(2.0)
Multiplies all images by a factor of ``2``, making the images significantly
brighter.
>>> aug = iaa.Multiply((0.5, 1.5))
Multiplies images by a random value sampled uniformly from the interval
``[0.5, 1.5]``, making some images darker and others brighter.
>>> aug = iaa.Multiply((0.5, 1.5), per_channel=True)
Identical to the previous example, but the sampled multipliers differ by
image *and* channel, instead of only by image.
>>> aug = iaa.Multiply((0.5, 1.5), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, mul=(0.8, 1.2), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Multiply, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.mul = iap.handle_continuous_param(
mul, "mul", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(
per_channel, "per_channel")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
rss = random_state.duplicate(2)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
mul_samples = self.mul.draw_samples(
(nb_images, nb_channels_max), random_state=rss[1])
gen = enumerate(zip(images, mul_samples, per_channel_samples))
for i, (image, mul_samples_i, per_channel_samples_i) in gen:
nb_channels = image.shape[2]
# Example code to directly multiply images via image*sample
# (uint8 only) -- apparently slower than LUT
# if per_channel_samples_i > 0.5:
# result = []
# image = image.astype(np.float32)
# mul_samples_i = mul_samples_i.astype(np.float32)
# for c, mul in enumerate(mul_samples_i[0:nb_channels]):
# result.append(
# np.clip(
# image[..., c:c+1] * mul, 0, 255
# ).astype(np.uint8))
# images[i] = np.concatenate(result, axis=2)
# else:
# images[i] = np.clip(
# image.astype(np.float32)
# * mul_samples_i[0].astype(np.float32),
# 0, 255
# ).astype(np.uint8)
if per_channel_samples_i > 0.5:
mul = mul_samples_i[0:nb_channels]
else:
# the if/else here catches the case of the channel axis being 0
mul = mul_samples_i[0] if mul_samples_i.size > 0 else []
batch.images[i] = multiply_scalar_(image, mul)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.mul, self.per_channel]
# TODO merge with Multiply
class MultiplyElementwise(meta.Augmenter):
"""
Multiply image pixels with values that are pixelwise randomly sampled.
While the ``Multiply`` Augmenter uses a constant multiplier *per
image* (and optionally channel), this augmenter samples the multipliers
to use per image and *per pixel* (and optionally per channel).
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.multiply_elementwise`.
Parameters
----------
mul : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The value with which to multiply pixel values in the image.
* If a number, then that value will always be used.
* If a tuple ``(a, b)``, then a value from the interval ``[a, b]``
will be sampled per image and pixel.
* If a list, then a random value will be sampled from that list
per image and pixel.
* If a ``StochasticParameter``, then that parameter will be used to
sample a new value per image and pixel.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MultiplyElementwise(2.0)
Multiply all images by a factor of ``2.0``, making them significantly
bighter.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5))
Samples per image and pixel uniformly a value from the interval
``[0.5, 1.5]`` and multiplies the pixel with that value.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True)
Samples per image and pixel *and channel* uniformly a value from the
interval ``[0.5, 1.5]`` and multiplies the pixel with that value. Therefore,
used multipliers may differ between channels of the same pixel.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, mul=(0.8, 1.2), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(MultiplyElementwise, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.mul = iap.handle_continuous_param(
mul, "mul",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel,
"per_channel")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
is_mul_binomial = isinstance(self.mul, iap.Binomial) or (
isinstance(self.mul, iap.FromLowerResolution)
and isinstance(self.mul.other_param, iap.Binomial)
)
gen = enumerate(zip(images, per_channel_samples, rss[1:]))
for i, (image, per_channel_samples_i, rs) in gen:
height, width, nb_channels = image.shape
sample_shape = (height,
width,
nb_channels if per_channel_samples_i > 0.5 else 1)
mul = self.mul.draw_samples(sample_shape, random_state=rs)
# TODO let Binomial return boolean mask directly instead of [0, 1]
# integers?
# hack to improve performance for Dropout and CoarseDropout
# converts mul samples to mask if mul is binomial
if mul.dtype.kind != "b" and is_mul_binomial:
mul = mul.astype(bool, copy=False)
batch.images[i] = multiply_elementwise_(image, mul)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.mul, self.per_channel]
# Added in 0.4.0.
class _CutoutSamples(object):
# Added in 0.4.0.
def __init__(self, nb_iterations, pos_x, pos_y, size_h, size_w, squared,
fill_mode, cval, fill_per_channel):
self.nb_iterations = nb_iterations
self.pos_x = pos_x
self.pos_y = pos_y
self.size_h = size_h
self.size_w = size_w
self.squared = squared
self.fill_mode = fill_mode
self.cval = cval
self.fill_per_channel = fill_per_channel
class Cutout(meta.Augmenter):
"""Fill one or more rectangular areas in an image using a fill mode.
See paper "Improved Regularization of Convolutional Neural Networks with
Cutout" by DeVries and Taylor.
In contrast to the paper, this implementation also supports replacing
image sub-areas with gaussian noise, random intensities or random RGB
colors. It also supports non-squared areas. While the paper uses
absolute pixel values for the size and position, this implementation
uses relative values, which seems more appropriate for mixed-size
datasets. The position parameter furthermore allows more flexibility, e.g.
gaussian distributions around the center.
.. note::
This augmenter affects only image data. Other datatypes (e.g.
segmentation map pixels or keypoints within the filled areas)
are not affected.
.. note::
Gaussian fill mode will assume that float input images contain values
in the interval ``[0.0, 1.0]`` and hence sample values from a
gaussian within that interval, i.e. from ``N(0.5, std=0.5/3)``.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.cutout_`.
Parameters
----------
nb_iterations : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
How many rectangular areas to fill.
* If ``int``: Exactly that many areas will be filled on all images.
* If ``tuple`` ``(a, b)``: A value from the interval ``[a, b]``
will be sampled per image.
* If ``list``: A random value will be sampled from that ``list``
per image.
* If ``StochasticParameter``: That parameter will be used to
sample ``(B,)`` values per batch of ``B`` images.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
Defines the position of each area to fill.
Analogous to the definition in e.g.
:class:`~imgaug.augmenters.size.CropToFixedSize`.
Usually, ``uniform`` (anywhere in the image) or ``normal`` (anywhere
in the image with preference around the center) are sane values.
size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The size of the rectangle to fill as a fraction of the corresponding
image size, i.e. with value range ``[0.0, 1.0]``. The size is sampled
independently per image axis.
* If ``number``: Exactly that size is always used.
* If ``tuple`` ``(a, b)``: A value from the interval ``[a, b]``
will be sampled per area and axis.
* If ``list``: A random value will be sampled from that ``list``
per area and axis.
* If ``StochasticParameter``: That parameter will be used to
sample ``(N, 2)`` values per batch, where ``N`` is the total
number of areas to fill within the whole batch.
squared : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to generate only squared areas cutout areas or allow
rectangular ones. If this evaluates to a true-like value, the
first value from `size` will be converted to absolute pixels and used
for both axes.
If this value is a float ``p``, then for ``p`` percent of all areas
to be filled `per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
fill_mode : str or list of str or imgaug.parameters.StochasticParameter, optional
Mode to use in order to fill areas. Corresponds to ``mode`` parameter
in some other augmenters. Valid strings for the mode are:
* ``contant``: Fill each area with a single value.
* ``gaussian``: Fill each area with gaussian noise.
Valid datatypes are:
* If ``str``: Exactly that mode will alaways be used.
* If ``list``: A random value will be sampled from that ``list``
per area.
* If ``StochasticParameter``: That parameter will be used to
sample ``(N,)`` values per batch, where ``N`` is the total number
of areas to fill within the whole batch.
cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The value to use (i.e. the color) to fill areas if `fill_mode` is
```constant``.
* If ``number``: Exactly that value is used for all areas
and channels.
* If ``tuple`` ``(a, b)``: A value from the interval ``[a, b]``
will be sampled per area (and channel if ``per_channel=True``).
* If ``list``: A random value will be sampled from that ``list``
per area (and channel if ``per_channel=True``).
* If ``StochasticParameter``: That parameter will be used to
sample ``(N, Cmax)`` values per batch, where ``N`` is the total
number of areas to fill within the whole batch and ``Cmax``
is the maximum number of channels in any image (usually ``3``).
If ``per_channel=False``, only the first value of the second
axis is used.
fill_per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to fill each area in a channelwise fashion (``True``) or
not (``False``).
The behaviour per fill mode is:
* ``constant``: Whether to fill all channels with the same value
(i.e, grayscale) or different values (i.e. usually RGB color).
* ``gaussian``: Whether to sample once from a gaussian and use the
values for all channels (i.e. grayscale) or to sample
channelwise (i.e. RGB colors)
If this value is a float ``p``, then for ``p`` percent of all areas
to be filled `per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Cutout(nb_iterations=2)
Fill per image two random areas, by default with grayish pixels.
>>> aug = iaa.Cutout(nb_iterations=(1, 5), size=0.2, squared=False)
Fill per image between one and five areas, each having ``20%``
of the corresponding size of the height and width (for non-square
images this results in non-square areas to be filled).
>>> aug = iaa.Cutout(fill_mode="constant", cval=255)
Fill all areas with white pixels.
>>> aug = iaa.Cutout(fill_mode="constant", cval=(0, 255),
>>> fill_per_channel=0.5)
Fill ``50%`` of all areas with a random intensity value between
``0`` and ``256``. Fill the other ``50%`` of all areas with
random colors.
>>> aug = iaa.Cutout(fill_mode="gaussian", fill_per_channel=True)
Fill areas with gaussian channelwise noise (i.e. usually RGB).
"""
# Added in 0.4.0.
def __init__(self,
nb_iterations=1,
position="uniform",
size=0.2,
squared=True,
fill_mode="constant",
cval=128,
fill_per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
from .size import _handle_position_parameter # TODO move to iap
from .geometric import _handle_cval_arg # TODO move to iap
super(Cutout, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.nb_iterations = iap.handle_discrete_param(
nb_iterations, "nb_iterations", value_range=(0, None),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.position = _handle_position_parameter(position)
self.size = iap.handle_continuous_param(
size, "size", value_range=(0.0, 1.0+1e-4),
tuple_to_uniform=True, list_to_choice=True)
self.squared = iap.handle_probability_param(squared, "squared")
self.fill_mode = self._handle_fill_mode_param(fill_mode)
self.cval = _handle_cval_arg(cval)
self.fill_per_channel = iap.handle_probability_param(
fill_per_channel, "fill_per_channel")
# Added in 0.4.0.
@classmethod
def _handle_fill_mode_param(cls, fill_mode):
if ia.is_string(fill_mode):
assert fill_mode in _CUTOUT_FILL_MODES, (
"Expected 'fill_mode' to be one of: %s. Got %s." % (
str(list(_CUTOUT_FILL_MODES.keys())), fill_mode))
return iap.Deterministic(fill_mode)
if isinstance(fill_mode, iap.StochasticParameter):
return fill_mode
assert ia.is_iterable(fill_mode), (
"Expected 'fill_mode' to be a string, "
"StochasticParameter or list of strings. Got type %s." % (
type(fill_mode).__name__))
return iap.Choice(fill_mode)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
samples = self._draw_samples(batch.images, random_state)
# map from xyhw to xyxy (both relative coords)
cutout_height_half = samples.size_h / 2
cutout_width_half = samples.size_w / 2
x1_rel = samples.pos_x - cutout_width_half
y1_rel = samples.pos_y - cutout_height_half
x2_rel = samples.pos_x + cutout_width_half
y2_rel = samples.pos_y + cutout_height_half
nb_iterations_sum = 0
gen = enumerate(zip(batch.images, samples.nb_iterations))
for i, (image, nb_iterations) in gen:
start = nb_iterations_sum
end = start + nb_iterations
height, width = image.shape[0:2]
# map from relative xyxy to absolute xyxy coords
batch.images[i] = self._augment_image_by_samples(
image,
x1_rel[start:end] * width,
y1_rel[start:end] * height,
x2_rel[start:end] * width,
y2_rel[start:end] * height,
samples.squared[start:end],
samples.fill_mode[start:end],
samples.cval[start:end],
samples.fill_per_channel[start:end],
random_state)
nb_iterations_sum += nb_iterations
return batch
# Added in 0.4.0.
def _draw_samples(self, images, random_state):
rngs = random_state.duplicate(8)
nb_rows = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
nb_iterations = self.nb_iterations.draw_samples(
(nb_rows,), random_state=rngs[0])
nb_dropped_areas = int(np.sum(nb_iterations))
if isinstance(self.position, tuple):
pos_x = self.position[0].draw_samples((nb_dropped_areas,),
random_state=rngs[1])
pos_y = self.position[1].draw_samples((nb_dropped_areas,),
random_state=rngs[2])
else:
pos = self.position.draw_samples((nb_dropped_areas, 2),
random_state=rngs[1])
pos_x = pos[:, 0]
pos_y = pos[:, 1]
size = self.size.draw_samples((nb_dropped_areas, 2),
random_state=rngs[3])
squared = self.squared.draw_samples((nb_dropped_areas,),
random_state=rngs[4])
fill_mode = self.fill_mode.draw_samples(
(nb_dropped_areas,), random_state=rngs[5])
cval = self.cval.draw_samples((nb_dropped_areas, nb_channels_max),
random_state=rngs[6])
fill_per_channel = self.fill_per_channel.draw_samples(
(nb_dropped_areas,), random_state=rngs[7])
return _CutoutSamples(
nb_iterations=nb_iterations,
pos_x=pos_x,
pos_y=pos_y,
size_h=size[:, 0],
size_w=size[:, 1],
squared=squared,
fill_mode=fill_mode,
cval=cval,
fill_per_channel=fill_per_channel
)
# Added in 0.4.0.
@classmethod
def _augment_image_by_samples(cls, image, x1, y1, x2, y2, squared,
fill_mode, cval, fill_per_channel,
random_state):
for i, x1_i in enumerate(x1):
x2_i = x2[i]
if squared[i] >= 0.5:
height_h = (y2[i] - y1[i]) / 2
x_center = x1_i + (x2_i - x1_i) / 2
x1_i = x_center - height_h
x2_i = x_center + height_h
image = cutout_(
image,
x1=x1_i,
y1=y1[i],
x2=x2_i,
y2=y2[i],
fill_mode=fill_mode[i],
cval=cval[i],
fill_per_channel=fill_per_channel[i],
seed=random_state)
return image
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.nb_iterations, self.position, self.size, self.squared,
self.fill_mode, self.cval, self.fill_per_channel]
# TODO verify that (a, b) still leads to a p being sampled per image and not
# per batch
class Dropout(MultiplyElementwise):
"""
Set a fraction of pixels in images to zero.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.MultiplyElementwise`.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. to set it to zero).
* If a float, then that value will be used for all images. A value
of ``1.0`` would mean that all pixels will be dropped
and ``0.0`` that no pixels will be dropped. A value of ``0.05``
corresponds to ``5`` percent of all pixels being dropped.
* If a tuple ``(a, b)``, then a value ``p`` will be sampled from
the interval ``[a, b]`` per image and be used as the pixel's
dropout probability.
* If a list, then a value will be sampled from that list per
batch and used as the probability.
* If a ``StochasticParameter``, then this parameter will be used to
determine per pixel whether it should be *kept* (sampled value
of ``>0.5``) or shouldn't be kept (sampled value of ``<=0.5``).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Dropout(0.02)
Drops ``2`` percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
Drops in each image a random fraction of all pixels, where the fraction
is uniformly sampled from the interval ``[0.0, 0.05]``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
Drops ``2`` percent of all pixels in a channelwise fashion, i.e. it is
unlikely for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for ``50`` percent of all images.
"""
def __init__(self, p=(0.0, 0.05), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
p_param = _handle_dropout_probability_param(p, "p")
super(Dropout, self).__init__(
p_param,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# Added in 0.4.0.
def _handle_dropout_probability_param(p, name):
if ia.is_single_number(p):
p_param = iap.Binomial(1 - p)
elif isinstance(p, tuple):
assert len(p) == 2, (
"Expected `%s` to be given as a tuple containing exactly 2 values, "
"got %d values." % (name, len(p),))
assert p[0] < p[1], (
"Expected `%s` to be given as a tuple containing exactly 2 values "
"(a, b) with a < b. Got %.4f and %.4f." % (name, p[0], p[1]))
assert 0 <= p[0] <= 1.0 and 0 <= p[1] <= 1.0, (
"Expected `%s` given as tuple to only contain values in the "
"interval [0.0, 1.0], got %.4f and %.4f." % (name, p[0], p[1]))
p_param = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif ia.is_iterable(p):
assert all([ia.is_single_number(v) for v in p]), (
"Expected iterable parameter '%s' to only contain numbers, "
"got %s." % (name, [type(v) for v in p],))
assert all([0 <= p_i <= 1.0 for p_i in p]), (
"Expected iterable parameter '%s' to only contain probabilities "
"in the interval [0.0, 1.0], got values %s." % (
name, ", ".join(["%.4f" % (p_i,) for p_i in p])))
p_param = iap.Binomial(1 - iap.Choice(p))
elif isinstance(p, iap.StochasticParameter):
p_param = p
else:
raise Exception(
"Expected `%s` to be float or int or tuple (<number>, <number>) "
"or StochasticParameter, got type '%s'." % (
name, type(p).__name__,))
return p_param
# TODO invert size_px and size_percent so that larger values denote larger
# areas being dropped instead of the opposite way around
class CoarseDropout(MultiplyElementwise):
"""
Set rectangular areas within images to zero.
In contrast to ``Dropout``, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so if e.g. there is any thin and high rectangle that is dropped, there is
a high likelihood that all other dropped areas are also thin and high.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
This augmenter is similar to Cutout. Usually, cutout is defined as an
operation that drops exactly one rectangle from an image, while here
``CoarseDropout`` can drop multiple rectangles (with some correlation
between the sizes of these rectangles).
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.MultiplyElementwise`.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero) in
the lower-resolution dropout mask.
* If a float, then that value will be used for all pixels. A value
of ``1.0`` would mean, that all pixels will be dropped. A value
of ``0.0`` would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value ``p`` will be sampled from
the interval ``[a, b]`` per image and be used as the dropout
probability.
* If a list, then a value will be sampled from that list per
batch and used as the probability.
* If a ``StochasticParameter``, then this parameter will be used to
determine per pixel whether it should be *kept* (sampled value
of ``>0.5``) or shouldn't be kept (sampled value of ``<=0.5``).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
size_px : None or int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
Note that this means that *lower* values of this parameter lead to
*larger* areas being dropped (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_percent` must be set.
* If an integer, then that size will always be used for both height
and width. E.g. a value of ``3`` would lead to a ``3x3`` mask,
which is then upsampled to ``HxW``, where ``H`` is the image size
and ``W`` the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be
sampled from the discrete interval ``[a..b]``. The dropout mask
will then be generated at size ``MxN`` and upsampled to ``HxW``.
* If a ``StochasticParameter``, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : None or float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
Note that this means that *lower* values of this parameter lead to
*larger* areas being dropped (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_px` must be set.
* If a float, then that value will always be used as the percentage
of the height and width (relative to the original size). E.g. for
value ``p``, the mask will be sampled from ``(p*H)x(p*W)`` and
later upsampled to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be
sampled from the interval ``(a, b)`` and used as the size
fractions, i.e the mask size will be ``(m*H)x(n*W)``.
* If a ``StochasticParameter``, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
min_size : int, optional
Minimum height and width of the low resolution mask. If
`size_percent` or `size_px` leads to a lower value than this,
`min_size` will be used instead. This should never have a value of
less than ``2``, otherwise one may end up with a ``1x1`` low resolution
mask, leading easily to the whole image being dropped.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
Drops ``2`` percent of all pixels on a lower-resolution image that has
``50`` percent of the original image's size, leading to dropped areas that
have roughly ``2x2`` pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
Generates a dropout mask at ``5`` to ``50`` percent of each input image's
size. In that mask, ``0`` to ``5`` percent of all pixels are marked as
being dropped. The mask is afterwards projected to the input image's
size to apply the actual dropout operation.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
Same as the previous example, but the lower resolution image has ``2`` to
``16`` pixels size. On images of e.g. ``224x224` pixels in size this would
lead to fairly large areas being dropped (height/width of ``224/2`` to
``224/16``).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
Drops ``2`` percent of all pixels at ``50`` percent resolution (``2x2``
sizes) in a channel-wise fashion, i.e. it is unlikely for any pixel to
have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
Same as the previous example, but the `per_channel` feature is only active
for ``50`` percent of all images.
"""
def __init__(self, p=(0.02, 0.1), size_px=None, size_percent=None,
per_channel=False, min_size=3,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
p_param = _handle_dropout_probability_param(p, "p")
if size_px is not None:
p_param = iap.FromLowerResolution(other_param=p_param,
size_px=size_px,
min_size=min_size)
elif size_percent is not None:
p_param = iap.FromLowerResolution(other_param=p_param,
size_percent=size_percent,
min_size=min_size)
else:
# default if neither size_px nor size_percent is provided
# is size_px=(3, 8)
p_param = iap.FromLowerResolution(other_param=p_param,
size_px=(3, 8),
min_size=min_size)
super(CoarseDropout, self).__init__(
p_param,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class Dropout2d(meta.Augmenter):
"""Drop random channels from images.
For image data, dropped channels will be filled with zeros.
.. note::
This augmenter may also set the arrays of heatmaps and segmentation
maps to zero and remove all coordinate-based data (e.g. it removes
all bounding boxes on images that were filled with zeros).
It does so if and only if *all* channels of an image are dropped.
If ``nb_keep_channels >= 1`` then that never happens.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any channel to be dropped (i.e. set to zero).
* If a ``float``, then that value will be used for all channels.
A value of ``1.0`` would mean, that all channels will be dropped.
A value of ``0.0`` would lead to no channels being dropped.
* If a tuple ``(a, b)``, then a value ``p`` will be sampled from
the interval ``[a, b)`` per batch and be used as the dropout
probability.
* If a list, then a value will be sampled from that list per
batch and used as the probability.
* If a ``StochasticParameter``, then this parameter will be used to
determine per channel whether it should be *kept* (sampled value
of ``>=0.5``) or shouldn't be kept (sampled value of ``<0.5``).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
nb_keep_channels : int
Minimum number of channels to keep unaltered in all images.
E.g. a value of ``1`` means that at least one channel in every image
will not be dropped, even if ``p=1.0``. Set to ``0`` to allow dropping
all channels.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Dropout2d(p=0.5)
Create a dropout augmenter that drops on average half of all image
channels. Dropped channels will be filled with zeros. At least one
channel is kept unaltered in each image (default setting).
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Dropout2d(p=0.5, nb_keep_channels=0)
Create a dropout augmenter that drops on average half of all image
channels *and* may drop *all* channels in an image (i.e. images may
contain nothing but zeros).
"""
# Added in 0.4.0.
def __init__(self, p=0.1, nb_keep_channels=1,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Dropout2d, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.p = _handle_dropout_probability_param(p, "p")
self.nb_keep_channels = max(nb_keep_channels, 0)
self._drop_images = True
self._drop_heatmaps = True
self._drop_segmentation_maps = True
self._drop_keypoints = True
self._drop_bounding_boxes = True
self._drop_polygons = True
self._drop_line_strings = True
self._heatmaps_cval = 0.0
self._segmentation_maps_cval = 0
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
imagewise_drop_channel_ids, all_dropped_ids = self._draw_samples(
batch, random_state)
if batch.images is not None:
for image, drop_ids in zip(batch.images,
imagewise_drop_channel_ids):
image[:, :, drop_ids] = 0
# Skip the non-image data steps below if we won't modify non-image
# anyways. Minor performance improvement.
if len(all_dropped_ids) == 0:
return batch
if batch.heatmaps is not None and self._drop_heatmaps:
cval = self._heatmaps_cval
for drop_idx in all_dropped_ids:
batch.heatmaps[drop_idx].arr_0to1[...] = cval
if batch.segmentation_maps is not None and self._drop_segmentation_maps:
cval = self._segmentation_maps_cval
for drop_idx in all_dropped_ids:
batch.segmentation_maps[drop_idx].arr[...] = cval
for attr_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
do_drop = getattr(self, "_drop_%s" % (attr_name,))
attr_value = getattr(batch, attr_name)
if attr_value is not None and do_drop:
for drop_idx in all_dropped_ids:
# same as e.g.:
# batch.bounding_boxes[drop_idx].bounding_boxes = []
setattr(attr_value[drop_idx], attr_name, [])
return batch
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
# maybe noteworthy here that the channel axis can have size 0,
# e.g. (5, 5, 0)
shapes = batch.get_rowwise_shapes()
shapes = [shape
if len(shape) >= 2
else tuple(list(shape) + [1])
for shape in shapes]
imagewise_channels = np.array([
shape[2] for shape in shapes
], dtype=np.int32)
# channelwise drop value over all images (float <0.5 = drop channel)
p_samples = self.p.draw_samples((int(np.sum(imagewise_channels)),),
random_state=random_state)
# We map the flat p_samples array to an imagewise one,
# convert the mask to channel-ids to drop and remove channel ids if
# there are more to be dropped than are allowed to be dropped (see
# nb_keep_channels).
# We also track all_dropped_ids, which contains the ids of examples
# (not channel ids!) where all channels were dropped.
imagewise_channels_to_drop = []
all_dropped_ids = []
channel_idx = 0
for i, nb_channels in enumerate(imagewise_channels):
p_samples_i = p_samples[channel_idx:channel_idx+nb_channels]
drop_ids = np.nonzero(p_samples_i < 0.5)[0]
nb_dropable = max(nb_channels - self.nb_keep_channels, 0)
if len(drop_ids) > nb_dropable:
random_state.shuffle(drop_ids)
drop_ids = drop_ids[:nb_dropable]
imagewise_channels_to_drop.append(drop_ids)
all_dropped = (len(drop_ids) == nb_channels)
if all_dropped:
all_dropped_ids.append(i)
channel_idx += nb_channels
return imagewise_channels_to_drop, all_dropped_ids
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.p, self.nb_keep_channels]
class TotalDropout(meta.Augmenter):
"""Drop all channels of a defined fraction of all images.
For image data, all components of dropped images will be filled with zeros.
.. note::
This augmenter also sets the arrays of heatmaps and segmentation
maps to zero and removes all coordinate-based data (e.g. it removes
all bounding boxes on images that were filled with zeros).
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of an image to be filled with zeros.
* If ``float``: The value will be used for all images.
A value of ``1.0`` would mean that all images will be set to zero.
A value of ``0.0`` would lead to no images being set to zero.
* If ``tuple`` ``(a, b)``: A value ``p`` will be sampled from
the interval ``[a, b)`` per batch and be used as the dropout
probability.
* If a list, then a value will be sampled from that list per
batch and used as the probability.
* If ``StochasticParameter``: The parameter will be used to
determine per image whether it should be *kept* (sampled value
of ``>=0.5``) or shouldn't be kept (sampled value of ``<0.5``).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.TotalDropout(1.0)
Create an augmenter that sets *all* components of all images to zero.
>>> aug = iaa.TotalDropout(0.5)
Create an augmenter that sets *all* components of ``50%`` of all images to
zero.
"""
# Added in 0.4.0.
def __init__(self, p=1,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(TotalDropout, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.p = _handle_dropout_probability_param(p, "p")
self._drop_images = True
self._drop_heatmaps = True
self._drop_segmentation_maps = True
self._drop_keypoints = True
self._drop_bounding_boxes = True
self._drop_polygons = True
self._drop_line_strings = True
self._heatmaps_cval = 0.0
self._segmentation_maps_cval = 0
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
drop_mask = self._draw_samples(batch, random_state)
drop_ids = None
if batch.images is not None and self._drop_images:
if ia.is_np_array(batch.images):
batch.images[drop_mask, ...] = 0
else:
drop_ids = self._generate_drop_ids_once(drop_mask, drop_ids)
for drop_idx in drop_ids:
batch.images[drop_idx][...] = 0
if batch.heatmaps is not None and self._drop_heatmaps:
drop_ids = self._generate_drop_ids_once(drop_mask, drop_ids)
cval = self._heatmaps_cval
for drop_idx in drop_ids:
batch.heatmaps[drop_idx].arr_0to1[...] = cval
if batch.segmentation_maps is not None and self._drop_segmentation_maps:
drop_ids = self._generate_drop_ids_once(drop_mask, drop_ids)
cval = self._segmentation_maps_cval
for drop_idx in drop_ids:
batch.segmentation_maps[drop_idx].arr[...] = cval
for attr_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
do_drop = getattr(self, "_drop_%s" % (attr_name,))
attr_value = getattr(batch, attr_name)
if attr_value is not None and do_drop:
drop_ids = self._generate_drop_ids_once(drop_mask, drop_ids)
for drop_idx in drop_ids:
# same as e.g.:
# batch.bounding_boxes[drop_idx].bounding_boxes = []
setattr(attr_value[drop_idx], attr_name, [])
return batch
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
p = self.p.draw_samples((batch.nb_rows,), random_state=random_state)
drop_mask = (p < 0.5)
return drop_mask
# Added in 0.4.0.
@classmethod
def _generate_drop_ids_once(cls, drop_mask, drop_ids):
if drop_ids is None:
drop_ids = np.nonzero(drop_mask)[0]
return drop_ids
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.p]
class ReplaceElementwise(meta.Augmenter):
"""
Replace pixels in an image with new values.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.replace_elementwise_`.
Parameters
----------
mask : float or tuple of float or list of float or imgaug.parameters.StochasticParameter
Mask that indicates the pixels that are supposed to be replaced.
The mask will be binarized using a threshold of ``0.5``. A value
of ``1`` then indicates a pixel that is supposed to be replaced.
* If this is a float, then that value will be used as the
probability of being a ``1`` in the mask (sampled per image and
pixel) and hence being replaced.
* If a tuple ``(a, b)``, then the probability will be uniformly
sampled per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image and pixel.
* If a ``StochasticParameter``, then this parameter will be used to
sample a mask per image.
replacement : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
The replacement to use at all locations that are marked as ``1`` in
the mask.
* If this is a number, then that value will always be used as the
replacement.
* If a tuple ``(a, b)``, then the replacement will be sampled
uniformly per image and pixel from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image and pixel.
* If a ``StochasticParameter``, then this parameter will be used
sample replacement values per image and pixel.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = ReplaceElementwise(0.05, [0, 255])
Replaces ``5`` percent of all pixels in each image by either ``0``
or ``255``.
>>> import imgaug.augmenters as iaa
>>> aug = ReplaceElementwise(0.1, [0, 255], per_channel=0.5)
For ``50%`` of all images, replace ``10%`` of all pixels with either the
value ``0`` or the value ``255`` (same as in the previous example). For
the other ``50%`` of all images, replace *channelwise* ``10%`` of all
pixels with either the value ``0`` or the value ``255``. So, it will be
very rare for each pixel to have all channels replaced by ``255`` or
``0``.
>>> import imgaug.augmenters as iaa
>>> import imgaug.parameters as iap
>>> aug = ReplaceElementwise(0.1, iap.Normal(128, 0.4*128), per_channel=0.5)
Replace ``10%`` of all pixels by gaussian noise centered around ``128``.
Both the replacement mask and the gaussian noise are sampled channelwise
for ``50%`` of all images.
>>> import imgaug.augmenters as iaa
>>> import imgaug.parameters as iap
>>> aug = ReplaceElementwise(
>>> iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
>>> iap.Normal(128, 0.4*128),
>>> per_channel=0.5)
Replace ``10%`` of all pixels by gaussian noise centered around ``128``.
Sample the replacement mask at a lower resolution (``8x8`` pixels) and
upscale it to the image size, resulting in coarse areas being replaced by
gaussian noise.
"""
def __init__(self, mask, replacement, per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(ReplaceElementwise, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.mask = iap.handle_probability_param(
mask, "mask", tuple_to_uniform=True, list_to_choice=True)
self.replacement = iap.handle_continuous_param(replacement,
"replacement")
self.per_channel = iap.handle_probability_param(per_channel,
"per_channel")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+2*nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
gen = enumerate(zip(images, per_channel_samples, rss[1::2], rss[2::2]))
for i, (image, per_channel_i, rs_mask, rs_replacement) in gen:
height, width, nb_channels = image.shape
sampling_shape = (height,
width,
nb_channels if per_channel_i > 0.5 else 1)
mask_samples = self.mask.draw_samples(sampling_shape,
random_state=rs_mask)
# TODO add separate per_channels for mask and replacement
# TODO add test that replacement with per_channel=False is not
# sampled per channel
if per_channel_i <= 0.5:
nb_channels = image.shape[-1]
replacement_samples = self.replacement.draw_samples(
(int(np.sum(mask_samples[:, :, 0])),),
random_state=rs_replacement)
# important here to use repeat instead of tile. repeat
# converts e.g. [0, 1, 2] to [0, 0, 1, 1, 2, 2], while tile
# leads to [0, 1, 2, 0, 1, 2]. The assignment below iterates
# over each channel and pixel simultaneously, *not* first
# over all pixels of channel 0, then all pixels in
# channel 1, ...
replacement_samples = np.repeat(replacement_samples,
nb_channels)
else:
replacement_samples = self.replacement.draw_samples(
(int(np.sum(mask_samples)),), random_state=rs_replacement)
batch.images[i] = replace_elementwise_(image, mask_samples,
replacement_samples)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.mask, self.replacement, self.per_channel]
class SaltAndPepper(ReplaceElementwise):
"""
Replace pixels in images with salt/pepper noise (white/black-ish colors).
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.ReplaceElementwise`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of replacing a pixel to salt/pepper noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a image-sized mask will be
sampled from that parameter per image. Any value ``>0.5`` in
that mask will be replaced with salt and pepper noise.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.SaltAndPepper(0.05)
Replace ``5%`` of all pixels with salt and pepper noise.
>>> import imgaug.augmenters as iaa
>>> aug = iaa.SaltAndPepper(0.05, per_channel=True)
Replace *channelwise* ``5%`` of all pixels with salt and pepper
noise.
"""
def __init__(self, p=(0.0, 0.03), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(SaltAndPepper, self).__init__(
mask=p,
replacement=iap.Beta(0.5, 0.5) * 255,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class ImpulseNoise(SaltAndPepper):
"""
Add impulse noise to images.
This is identical to ``SaltAndPepper``, except that `per_channel` is
always set to ``True``.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.SaltAndPepper`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of replacing a pixel to impulse noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a image-sized mask will be
sampled from that parameter per image. Any value ``>0.5`` in
that mask will be replaced with impulse noise noise.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.ImpulseNoise(0.1)
Replace ``10%`` of all pixels with impulse noise.
"""
def __init__(self, p=(0.0, 0.03),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(ImpulseNoise, self).__init__(
p=p,
per_channel=True,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class CoarseSaltAndPepper(ReplaceElementwise):
"""
Replace rectangular areas in images with white/black-ish pixel noise.
This adds salt and pepper noise (noisy white-ish and black-ish pixels) to
rectangular areas within the image. Note that this means that within these
rectangular areas the color varies instead of each rectangle having only
one color.
See also the similar ``CoarseDropout``.
TODO replace dtype support with uint8 only, because replacement is
geared towards that value range
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.ReplaceElementwise`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to salt/pepper noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a lower-resolution mask will
be sampled from that parameter per image. Any value ``>0.5`` in
that mask will denote a spatial location that is to be replaced
by salt and pepper noise.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask in absolute pixel dimensions.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_percent` must be set.
* If an integer, then that size will always be used for both height
and width. E.g. a value of ``3`` would lead to a ``3x3`` mask,
which is then upsampled to ``HxW``, where ``H`` is the image size
and ``W`` the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be
sampled from the discrete interval ``[a..b]``. The mask
will then be generated at size ``MxN`` and upsampled to ``HxW``.
* If a ``StochasticParameter``, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask *in percent* of the input image.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_px` must be set.
* If a float, then that value will always be used as the percentage
of the height and width (relative to the original size). E.g. for
value ``p``, the mask will be sampled from ``(p*H)x(p*W)`` and
later upsampled to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be
sampled from the interval ``(a, b)`` and used as the size
fractions, i.e the mask size will be ``(m*H)x(n*W)``.
* If a ``StochasticParameter``, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
min_size : int, optional
Minimum height and width of the low resolution mask. If
`size_percent` or `size_px` leads to a lower value than this,
`min_size` will be used instead. This should never have a value of
less than ``2``, otherwise one may end up with a ``1x1`` low resolution
mask, leading easily to the whole image being replaced.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.1))
Marks ``5%`` of all pixels in a mask to be replaced by salt/pepper
noise. The mask has ``1%`` to ``10%`` the size of the input image.
The mask is then upscaled to the input image size, leading to large
rectangular areas being marked as to be replaced. These areas are then
replaced in the input image by salt/pepper noise.
>>> aug = iaa.CoarseSaltAndPepper(0.05, size_px=(4, 16))
Same as in the previous example, but the replacement mask before upscaling
has a size between ``4x4`` and ``16x16`` pixels (the axis sizes are sampled
independently, i.e. the mask may be rectangular).
>>> aug = iaa.CoarseSaltAndPepper(
>>> 0.05, size_percent=(0.01, 0.1), per_channel=True)
Same as in the first example, but mask and replacement are each sampled
independently per image channel.
"""
def __init__(self, p=(0.02, 0.1), size_px=None, size_percent=None,
per_channel=False, min_size=3,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
mask = iap.handle_probability_param(
p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(
other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(
other_param=mask, size_percent=size_percent, min_size=min_size)
else:
mask_low = iap.FromLowerResolution(
other_param=mask, size_px=(3, 8), min_size=min_size)
replacement = iap.Beta(0.5, 0.5) * 255
super(CoarseSaltAndPepper, self).__init__(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class Salt(ReplaceElementwise):
"""
Replace pixels in images with salt noise, i.e. white-ish pixels.
This augmenter is similar to ``SaltAndPepper``, but adds no pepper noise to
images.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.ReplaceElementwise`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of replacing a pixel with salt noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a image-sized mask will be
sampled from that parameter per image. Any value ``>0.5`` in
that mask will be replaced with salt noise.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Salt(0.05)
Replace ``5%`` of all pixels with salt noise (white-ish colors).
"""
def __init__(self, p=(0.0, 0.03), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=True,
mode="invert"
) + 0.5
# FIXME max replacement seems to essentially never exceed 254
replacement = replacement01 * 255
super(Salt, self).__init__(
mask=p,
replacement=replacement,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class CoarseSalt(ReplaceElementwise):
"""
Replace rectangular areas in images with white-ish pixel noise.
See also the similar ``CoarseSaltAndPepper``.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.ReplaceElementwise`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to salt noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a lower-resolution mask will
be sampled from that parameter per image. Any value ``>0.5`` in
that mask will denote a spatial location that is to be replaced
by salt noise.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask in absolute pixel dimensions.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_percent` must be set.
* If an integer, then that size will always be used for both height
and width. E.g. a value of ``3`` would lead to a ``3x3`` mask,
which is then upsampled to ``HxW``, where ``H`` is the image size
and ``W`` the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be
sampled from the discrete interval ``[a..b]``. The mask
will then be generated at size ``MxN`` and upsampled to ``HxW``.
* If a ``StochasticParameter``, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask *in percent* of the input image.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_px` must be set.
* If a float, then that value will always be used as the percentage
of the height and width (relative to the original size). E.g. for
value ``p``, the mask will be sampled from ``(p*H)x(p*W)`` and
later upsampled to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be
sampled from the interval ``(a, b)`` and used as the size
fractions, i.e the mask size will be ``(m*H)x(n*W)``.
* If a ``StochasticParameter``, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
min_size : int, optional
Minimum height and width of the low resolution mask. If
`size_percent` or `size_px` leads to a lower value than this,
`min_size` will be used instead. This should never have a value of
less than ``2``, otherwise one may end up with a ``1x1`` low resolution
mask, leading easily to the whole image being replaced.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CoarseSalt(0.05, size_percent=(0.01, 0.1))
Mark ``5%`` of all pixels in a mask to be replaced by salt
noise. The mask has ``1%`` to ``10%`` the size of the input image.
The mask is then upscaled to the input image size, leading to large
rectangular areas being marked as to be replaced. These areas are then
replaced in the input image by salt noise.
"""
def __init__(self, p=(0.02, 0.1), size_px=None, size_percent=None,
per_channel=False, min_size=3,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
mask = iap.handle_probability_param(
p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(
other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(
other_param=mask, size_percent=size_percent, min_size=min_size)
else:
mask_low = iap.FromLowerResolution(
other_param=mask, size_px=(3, 8), min_size=min_size)
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=True,
mode="invert"
) + 0.5
replacement = replacement01 * 255
super(CoarseSalt, self).__init__(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class Pepper(ReplaceElementwise):
"""
Replace pixels in images with pepper noise, i.e. black-ish pixels.
This augmenter is similar to ``SaltAndPepper``, but adds no salt noise to
images.
This augmenter is similar to ``Dropout``, but slower and the black pixels
are not uniformly black.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.ReplaceElementwise`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of replacing a pixel with pepper noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a image-sized mask will be
sampled from that parameter per image. Any value ``>0.5`` in
that mask will be replaced with pepper noise.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Pepper(0.05)
Replace ``5%`` of all pixels with pepper noise (black-ish colors).
"""
def __init__(self, p=(0.0, 0.05), per_channel=False,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
super(Pepper, self).__init__(
mask=p,
replacement=replacement,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class CoarsePepper(ReplaceElementwise):
"""
Replace rectangular areas in images with black-ish pixel noise.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.ReplaceElementwise`.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a lower-resolution mask will
be sampled from that parameter per image. Any value ``>0.5`` in
that mask will denote a spatial location that is to be replaced
by pepper noise.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask in absolute pixel dimensions.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_percent` must be set.
* If an integer, then that size will always be used for both height
and width. E.g. a value of ``3`` would lead to a ``3x3`` mask,
which is then upsampled to ``HxW``, where ``H`` is the image size
and ``W`` the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be
sampled from the discrete interval ``[a..b]``. The mask
will then be generated at size ``MxN`` and upsampled to ``HxW``.
* If a ``StochasticParameter``, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask *in percent* of the input image.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_px` must be set.
* If a float, then that value will always be used as the percentage
of the height and width (relative to the original size). E.g. for
value ``p``, the mask will be sampled from ``(p*H)x(p*W)`` and
later upsampled to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be
sampled from the interval ``(a, b)`` and used as the size
fractions, i.e the mask size will be ``(m*H)x(n*W)``.
* If a ``StochasticParameter``, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a ``1x1`` low resolution mask, leading
easily to the whole image being replaced.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Mark ``5%`` of all pixels in a mask to be replaced by pepper
noise. The mask has ``1%`` to ``10%`` the size of the input image.
The mask is then upscaled to the input image size, leading to large
rectangular areas being marked as to be replaced. These areas are then
replaced in the input image by pepper noise.
"""
def __init__(self, p=(0.02, 0.1), size_px=None, size_percent=None,
per_channel=False, min_size=3,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
mask = iap.handle_probability_param(
p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(
other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(
other_param=mask, size_percent=size_percent, min_size=min_size)
else:
mask_low = iap.FromLowerResolution(
other_param=mask, size_px=(3, 8), min_size=min_size)
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
super(CoarsePepper, self).__init__(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class Invert(meta.Augmenter):
"""
Invert all values in images, e.g. turn ``5`` into ``255-5=250``.
For the standard value range of 0-255 it converts ``0`` to ``255``,
``255`` to ``0`` and ``10`` to ``(255-10)=245``.
Let ``M`` be the maximum value possible, ``m`` the minimum value possible,
``v`` a value. Then the distance of ``v`` to ``m`` is ``d=abs(v-m)`` and
the new value is given by ``v'=M-d``.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.invert_`.
Parameters
----------
p : float or imgaug.parameters.StochasticParameter, optional
The probability of an image to be inverted.
* If a float, then that probability will be used for all images,
i.e. `p` percent of all images will be inverted.
* If a ``StochasticParameter``, then that parameter will be queried
per image and is expected to return values in the interval
``[0.0, 1.0]``, where values ``>0.5`` mean that the image
is supposed to be inverted. Recommended to be some form of
``imgaug.parameters.Binomial``.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
min_value : None or number, optional
Minimum of the value range of input images, e.g. ``0`` for ``uint8``
images. If set to ``None``, the value will be automatically derived
from the image's dtype.
max_value : None or number, optional
Maximum of the value range of input images, e.g. ``255`` for ``uint8``
images. If set to ``None``, the value will be automatically derived
from the image's dtype.
threshold : None or number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
A threshold to use in order to invert only numbers above or below
the threshold. If ``None`` no thresholding will be used.
* If ``None``: No thresholding will be used.
* If ``number``: The value will be used for all images.
* If ``tuple`` ``(a, b)``: A value will be uniformly sampled per
image from the interval ``[a, b)``.
* If ``list``: A random value will be picked from the list per
image.
* If ``StochasticParameter``: Per batch of size ``N``, the
parameter will be queried once to return ``(N,)`` samples.
invert_above_threshold : bool or float or imgaug.parameters.StochasticParameter, optional
If ``True``, only values ``>=threshold`` will be inverted.
Otherwise, only values ``<threshold`` will be inverted.
If a ``number``, then expected to be in the interval ``[0.0, 1.0]`` and
denoting an imagewise probability. If a ``StochasticParameter`` then
``(N,)`` values will be sampled from the parameter per batch of size
``N`` and interpreted as ``True`` if ``>0.5``.
If `threshold` is ``None`` this parameter has no effect.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Invert(0.1)
Inverts the colors in ``10`` percent of all images.
>>> aug = iaa.Invert(0.1, per_channel=True)
Inverts the colors in ``10`` percent of all image channels. This may or
may not lead to multiple channels in an image being inverted.
>>> aug = iaa.Invert(0.1, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
# when no custom min/max are chosen, all bool, uint, int and float dtypes
# should be invertable (float tested only up to 64bit)
# when chosing custom min/max:
# - bool makes no sense, not allowed
# - int and float must be increased in resolution if custom min/max values
# are chosen, hence they are limited to 32 bit and below
# - uint64 is converted by numpy's clip to float64, hence loss of accuracy
# - float16 seems to not be perfectly accurate, but still ok-ish -- was
# off by 10 for center value of range (float 16 min, 16), where float
# 16 min is around -65500
ALLOW_DTYPES_CUSTOM_MINMAX = [
np.dtype(dt) for dt in [
np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float16, np.float32
]
]
def __init__(self, p=1, per_channel=False, min_value=None, max_value=None,
threshold=None, invert_above_threshold=0.5,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Invert, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO allow list and tuple for p
self.p = iap.handle_probability_param(p, "p")
self.per_channel = iap.handle_probability_param(per_channel,
"per_channel")
self.min_value = min_value
self.max_value = max_value
if threshold is None:
self.threshold = None
else:
self.threshold = iap.handle_continuous_param(
threshold, "threshold", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.invert_above_threshold = iap.handle_probability_param(
invert_above_threshold, "invert_above_threshold")
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
samples = self._draw_samples(batch, random_state)
for i, image in enumerate(batch.images):
if 0 in image.shape:
continue
kwargs = {
"min_value": samples.min_value[i],
"max_value": samples.max_value[i],
"threshold": samples.threshold[i],
"invert_above_threshold": samples.invert_above_threshold[i]
}
if samples.per_channel[i]:
nb_channels = image.shape[2]
mask = samples.p[i, :nb_channels]
image[..., mask] = invert_(image[..., mask], **kwargs)
else:
if samples.p[i, 0]:
image[:, :, :] = invert_(image, **kwargs)
return batch
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
nb_images = batch.nb_rows
nb_channels = meta.estimate_max_number_of_channels(batch.images)
p = self.p.draw_samples((nb_images, nb_channels),
random_state=random_state)
p = (p > 0.5)
per_channel = self.per_channel.draw_samples((nb_images,),
random_state=random_state)
per_channel = (per_channel > 0.5)
min_value = [self.min_value] * nb_images
max_value = [self.max_value] * nb_images
if self.threshold is None:
threshold = [None] * nb_images
else:
threshold = self.threshold.draw_samples(
(nb_images,), random_state=random_state)
invert_above_threshold = self.invert_above_threshold.draw_samples(
(nb_images,), random_state=random_state)
invert_above_threshold = (invert_above_threshold > 0.5)
return _InvertSamples(
p=p,
per_channel=per_channel,
min_value=min_value,
max_value=max_value,
threshold=threshold,
invert_above_threshold=invert_above_threshold
)
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.p, self.per_channel, self.min_value, self.max_value,
self.threshold, self.invert_above_threshold]
# Added in 0.4.0.
class _InvertSamples(object):
# Added in 0.4.0.
def __init__(self, p, per_channel, min_value, max_value,
threshold, invert_above_threshold):
self.p = p
self.per_channel = per_channel
self.min_value = min_value
self.max_value = max_value
self.threshold = threshold
self.invert_above_threshold = invert_above_threshold
class Solarize(Invert):
"""Invert all pixel values above a threshold.
This is the same as :class:`Invert`, but sets a default threshold around
``128`` (+/- 64, decided per image) and default `invert_above_threshold`
to ``True`` (i.e. only values above the threshold will be inverted).
See :class:`Invert` for more details.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.arithmetic.Invert`.
Parameters
----------
p : float or imgaug.parameters.StochasticParameter
See :class:`Invert`.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
See :class:`Invert`.
min_value : None or number, optional
See :class:`Invert`.
max_value : None or number, optional
See :class:`Invert`.
threshold : None or number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :class:`Invert`.
invert_above_threshold : bool or float or imgaug.parameters.StochasticParameter, optional
See :class:`Invert`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Solarize(0.5, threshold=(32, 128))
Invert the colors in ``50`` percent of all images for pixels with a
value between ``32`` and ``128`` or more. The threshold is sampled once
per image. The thresholding operation happens per channel.
"""
def __init__(self, p=1, per_channel=False, min_value=None, max_value=None,
threshold=(128-64, 128+64), invert_above_threshold=True,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Solarize, self).__init__(
p=p, per_channel=per_channel,
min_value=min_value, max_value=max_value,
threshold=threshold, invert_above_threshold=invert_above_threshold,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO remove from examples
@ia.deprecated("imgaug.contrast.LinearContrast")
def ContrastNormalization(alpha=1.0, per_channel=False,
seed=None, name=None,
random_state="deprecated",
deterministic="deprecated"):
"""
Change the contrast of images.
dtype support:
See ``imgaug.augmenters.contrast.LinearContrast``.
Deprecated since 0.3.0.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of the contrast normalization. Higher values than 1.0
lead to higher contrast, lower values decrease the contrast.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image
uniformly from the interval ``[a, b]`` and be used as the alpha
value.
* If a list, then a random value will be picked per image from
that list.
* If a ``StochasticParameter``, then this parameter will be used to
sample the alpha value per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> iaa.ContrastNormalization((0.5, 1.5))
Decreases oder improves contrast per image by a random factor between
``0.5`` and ``1.5``. The factor ``0.5`` means that any difference from
the center value (i.e. 128) will be halved, leading to less contrast.
>>> iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
Same as before, but for 50 percent of all images the normalization is done
independently per channel (i.e. factors can vary per channel for the same
image). In the other 50 percent of all images, the factor is the same for
all channels.
"""
# pylint: disable=invalid-name
# placed here to avoid cyclic dependency
from . import contrast as contrast_lib
return contrast_lib.LinearContrast(
alpha=alpha, per_channel=per_channel,
seed=seed, name=name, random_state=random_state, deterministic=deterministic)
# TODO try adding per channel somehow
class JpegCompression(meta.Augmenter):
"""
Degrade the quality of images by JPEG-compressing them.
During JPEG compression, high frequency components (e.g. edges) are removed.
With low compression (strength) only the highest frequency components are
removed, while very high compression (strength) will lead to only the
lowest frequency components "surviving". This lowers the image quality.
For more details, see https://en.wikipedia.org/wiki/Compression_artifact.
Note that this augmenter still returns images as numpy arrays (i.e. saves
the images with JPEG compression and then reloads them into arrays). It
does not return the raw JPEG file content.
**Supported dtypes**:
See :func:`~imgaug.augmenters.arithmetic.compress_jpeg`.
Parameters
----------
compression : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Degree of compression used during JPEG compression within value range
``[0, 100]``. Higher values denote stronger compression and will cause
low-frequency components to disappear. Note that JPEG's compression
strength is also often set as a *quality*, which is the inverse of this
parameter. Common choices for the *quality* setting are around 80 to 95,
depending on the image. This translates here to a *compression*
parameter of around 20 to 5.
* If a single number, then that value always will be used as the
compression.
* If a tuple ``(a, b)``, then the compression will be
a value sampled uniformly from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image and used as the compression.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing the
compression for the ``n``-th image.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.JpegCompression(compression=(70, 99))
Remove high frequency components in images via JPEG compression with
a *compression strength* between ``70`` and ``99`` (randomly and
uniformly sampled per image). This corresponds to a (very low) *quality*
setting of ``1`` to ``30``.
"""
def __init__(self, compression=(0, 100),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(JpegCompression, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# will be converted to int during augmentation, which is why we allow
# floats here
self.compression = iap.handle_continuous_param(
compression, "compression",
value_range=(0, 100), tuple_to_uniform=True, list_to_choice=True)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
samples = self.compression.draw_samples((nb_images,),
random_state=random_state)
for i, (image, sample) in enumerate(zip(images, samples)):
batch.images[i] = compress_jpeg(image, int(sample))
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.compression]
| aleju/ImageAugmenter | imgaug/augmenters/arithmetic.py | Python | mit | 205,414 | [
"Gaussian"
] | 0f4a49f6fa392f6e7ea89cf37a0f4393615959a00e24472f2f7854a8054aa7c2 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptObjectRegistry, ScriptInterfaceHelper, script_interface_register
from .__init__ import has_features
if any(has_features(i) for i in ["LB_BOUNDARIES", "LB_BOUNDARIES_GPU"]):
@script_interface_register
class LBBoundaries(ScriptObjectRegistry):
"""
Creates a set of lattice-Boltzmann boundaries.
"""
_so_name = "LBBoundaries::LBBoundaries"
def add(self, *args, **kwargs):
"""
Adds a boundary to the set of boundaries.
Either pass a valid boundary as argument,
or a valid set of parameters to create a boundary.
"""
if len(args) == 1:
if isinstance(args[0], LBBoundary):
lbboundary = args[0]
else:
raise TypeError(
"Either a LBBoundary object or key-value pairs for the parameters of a LBBoundary object need to be passed.")
else:
lbboundary = LBBoundary(**kwargs)
self.call_method("add", object=lbboundary)
return lbboundary
def remove(self, lbboundary):
"""
Removes a boundary from the set.
Parameters
----------
lbboundary : :obj:`LBBoundary`
The boundary to be removed from the set.
"""
self.call_method("remove", object=lbboundary)
def clear(self):
"""
Removes all boundaries.
"""
self.call_method("clear")
def size(self):
return self.call_method("size")
def empty(self):
return self.call_method("empty")
@script_interface_register
class LBBoundary(ScriptInterfaceHelper):
"""
Creates a LB boundary from a shape.
The fluid velocity is limited to :math:`v_{\\mathrm{max}} = 0.20`
(see *quasi-incompressible limit* in :cite:`kruger17a`,
chapter 7, page 272), which corresponds to Mach 0.35.
The relative error in the fluid density between a compressible fluid
and an incompressible fluid at Mach 0.30 is less than 5% (see
*constant density assumption* in :cite:`kundu01a` chapter 16, page
663). Since the speed of sound is :math:`c_s = 1 / \\sqrt{3}` in LB
velocity units in a D3Q19 lattice, the velocity limit at Mach 0.30
is :math:`v_{\\mathrm{max}} = 0.30 / \\sqrt{3} \\approx 0.17`.
At Mach 0.35 the relative error is around 6% and
:math:`v_{\\mathrm{max}} = 0.35 / \\sqrt{3} \\approx 0.20`.
Parameters
----------
shape : :obj:`espressomd.shapes.Shape`
The shape from which to build the boundary.
velocity : (3,) array_like of :obj:`float`, optional
The boundary slip velocity. By default, a velocity of zero is used
(no-slip boundary condition).
"""
_so_name = "LBBoundaries::LBBoundary"
_so_bind_methods = ("get_force",)
| espressomd/espresso | src/python/espressomd/lbboundaries.py | Python | gpl-3.0 | 3,773 | [
"ESPResSo"
] | 985b36e318b2198cbf7a622fb2a064f52f300bec4eced7c19c778966ddf57ede |
'''
@author: David W.H. Swenson
'''
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from past.utils import old_div
from builtins import object
import os
from nose.tools import (assert_equal, assert_not_equal, assert_almost_equal)
from nose.plugins.skip import SkipTest
import openpathsampling as paths
import openpathsampling.engines.toy as toy
from .test_helpers import (true_func, assert_equal_array_array,
assert_items_equal)
import numpy as np
# =========================================================================
# This single test module includes all the tests for the toy_dynamics
# subpackage.
# =========================================================================
def setup_module():
# set up globals
global gaussian, linear, outer, harmonic
gaussian = toy.Gaussian(6.0, [2.5, 40.0], [0.8, 0.5])
outer = toy.OuterWalls([1.12, 2.0], [0.2, -0.25])
linear = toy.LinearSlope([1.5, 0.75], 0.5)
harmonic = toy.HarmonicOscillator([1.5, 2.0], [0.5, 3.0], [0.25, 0.75])
global init_pos, init_vel, sys_mass
init_pos = np.array([0.7, 0.65])
init_vel = np.array([0.6, 0.5])
sys_mass = np.array([1.5, 1.5])
# === TESTS FOR TOY POTENTIAL ENERGY SURFACES =============================
class TestHarmonicOscillator(object):
def setup(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# k = m * omega^2 = [1.5, 1.5] * [0.5, 3.0]^2
# = [0.375, 13.5]
# V = 0.5*( 1.5*0.375*((0.7)-0.25)^2 + 2.0*13.5*((0.65)-0.75)^2)
# = 0.191953125
assert_almost_equal(harmonic.V(self), 0.191953125)
def test_dVdx(self):
# [1.5, 2.0] * [1.5, 1.5] * [0.5, 3.0]^2 * [(0.7)-0.25, (0.65)-0.75]
# = [1.5*1.5*0.5^2*((0.7)-0.25), 2.0*1.5*3.0^2*((0.65)-0.75)]
# = [0.253125, -2.7]
for (experiment, theory) in zip(harmonic.dVdx(self),
[0.253125, -2.7]):
assert_almost_equal(experiment, theory)
class TestGaussian(object):
def setup(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# 6.0*exp(-2.5*((0.7)-0.8)^2-40.0*((0.65)-0.5)^2) = 2.37918851445
assert_almost_equal(gaussian.V(self), 2.37918851445)
def test_dVdx(self):
# exp(-2.5*((0.7)-0.8)^2-40*((0.65)-0.5)^2)*(-30*(0.7)+24)
assert_almost_equal(gaussian.dVdx(self)[0], 1.18959425722)
# -480*((0.65)-0.5)*exp(-2.5*((0.7)-0.8)^2-40*((0.65)-0.5)^2)
assert_almost_equal(gaussian.dVdx(self)[1], -28.5502621734)
class TestOuterWalls(object):
def setup(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# 1.12*(0.7-0.2)^6+2.0*(0.65-(-0.25))^6 = 1.080382
assert_almost_equal(outer.V(self), 1.080382)
def test_dVdx(self):
# 6*1.12*(0.7-0.2)^5 = 0.21
assert_almost_equal(outer.dVdx(self)[0], 0.21)
# 6*2.0*(0.65-(-0.25))^5 = 7.08588
assert_almost_equal(outer.dVdx(self)[1], 7.08588)
class TestLinearSlope(object):
def setup(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
assert_almost_equal(linear.V(self), 2.0375)
def test_dVdx(self):
assert_equal(linear.dVdx(self), [1.5, 0.75])
class TestCombinations(object):
def setup(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
self.simpletest = gaussian + gaussian
self.fullertest = gaussian + outer - linear
def test_V(self):
assert_almost_equal(self.simpletest.V(self), 2*2.37918851445)
assert_almost_equal(self.fullertest.V(self),
2.37918851445 + 1.080382 - 2.0375)
def test_dVdx(self):
assert_almost_equal(self.simpletest.dVdx(self)[0], 2*1.18959425722)
assert_almost_equal(self.simpletest.dVdx(self)[1], 2*-28.5502621734)
assert_almost_equal(self.fullertest.dVdx(self)[0],
1.18959425722 + 0.21 - 1.5)
assert_almost_equal(self.fullertest.dVdx(self)[1],
-28.5502621734 + 7.08588 - 0.75)
def test_kinetic_energy(self):
assert_almost_equal(self.simpletest.kinetic_energy(self), 0.4575)
# === TESTS FOR TOY ENGINE OBJECT =========================================
class Test_convert_fcn(object):
def test_convert_to_3Ndim(v):
raise SkipTest
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0]),
np.array([[1.0, 2.0, 0.0]]))
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0, 3.0]),
np.array([[1.0, 2.0, 3.0]]))
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0, 3.0, 4.0]),
np.array([[1.0, 2.0, 3.0], [4.0, 0.0, 0.0]]))
class TestToyEngine(object):
def setup(self):
pes = linear
integ = toy.LeapfrogVerletIntegrator(dt=0.002)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def teardown(self):
if os.path.isfile('toy_tmp.nc'):
os.remove('toy_tmp.nc')
def test_sanity(self):
assert_items_equal(self.sim._mass, sys_mass)
assert_items_equal(self.sim._minv, [old_div(1.0,m_i) for m_i in sys_mass])
assert_equal(self.sim.n_steps_per_frame, 10)
def test_snapshot_timestep(self):
assert_equal(self.sim.snapshot_timestep, 0.02)
def test_snapshot_get(self):
snapshot = self.sim.current_snapshot
assert_items_equal(snapshot.velocities[0],
self.sim.velocities)
assert_items_equal(snapshot.coordinates[0],
self.sim.positions)
def test_snapshot_set(self):
snap = toy.Snapshot(coordinates=np.array([[1,2,3]]),
velocities=np.array([[4,5,6]]))
self.sim.current_snapshot = snap
assert_items_equal(self.sim.positions, [1,2,3])
assert_items_equal(self.sim.velocities, [4,5,6])
def test_generate_next_frame(self):
# we test correctness by integrating forward, then backward
assert_items_equal(self.sim.positions, init_pos)
assert_items_equal(self.sim.velocities, init_vel)
snap = self.sim.generate_next_frame()
#assert_equal_array_array(snap.coordinates,
#np.array([init_pos.append(0.0)]))
self.sim.velocities = -self.sim.velocities
snap2 = self.sim.generate_next_frame()
np.testing.assert_allclose(snap2.coordinates[0], init_pos)
def test_generate(self):
self.sim.initialized = True
try:
traj = self.sim.generate(self.sim.current_snapshot, [true_func])
except paths.engines.EngineMaxLengthError as e:
traj = e.last_trajectory
assert_equal(len(traj), self.sim.n_frames_max)
else:
raise RuntimeError('Did not raise MaxLength Error')
def test_generate_n_frames(self):
self.sim.initialized = True
ens = paths.LengthEnsemble(4) # first snap plus n_frames
orig = self.sim.current_snapshot.copy()
traj1 = self.sim.generate(self.sim.current_snapshot, [ens.can_append])
self.sim.current_snapshot = orig
traj2 = [orig] + self.sim.generate_n_frames(3)
assert_equal(len(traj1), len(traj2))
for (s1, s2) in zip(traj1, traj2):
# snapshots are not the same object
assert_not_equal(s1, s2)
# however, they have the same values stored in them
assert_equal(len(s1.coordinates), 1)
assert_equal(len(s1.coordinates[0]), 2)
assert_items_equal(s1.coordinates[0], s2.coordinates[0])
assert_items_equal(s1.velocities[0], s2.velocities[0])
def test_start_with_snapshot(self):
snap = toy.Snapshot(coordinates=np.array([1,2]),
velocities=np.array([3,4]))
self.sim.start(snapshot=snap)
self.sim.stop([snap])
def test_has_constraints(self):
assert not self.sim.has_constraints()
# === TESTS FOR TOY INTEGRATORS ===========================================
class TestLeapfrogVerletIntegrator(object):
def setup(self):
pes = linear
integ = toy.LeapfrogVerletIntegrator(dt=0.002)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def test_momentum_update(self):
self.sim.integ._momentum_update(self.sim, 0.002)
# velocities = init_vel - pes.dVdx(init_pos)/m*dt
# = [0.6, 0.5] - [1.5, 0.75]/[1.5, 1.5] * 0.002
# = [0.598, 0.499]
assert_equal(self.sim.velocities[0], 0.598)
assert_equal(self.sim.velocities[1], 0.499)
def test_position_update(self):
self.sim.integ._position_update(self.sim, 0.002)
# positions = init_pos + velocities * dt
# = [0.7, 0.65] + [0.6, 0.5]*0.002 = [0.7012, 0.651]
assert_almost_equal(self.sim.positions[0], 0.7012)
assert_almost_equal(self.sim.positions[1], 0.651)
def test_step(self):
# no assertions since the tests of position/momentum updates should
# handle that... this is just to make sure we run
self.sim.integ.step(self.sim)
self.sim.integ.step(self.sim)
class TestLangevinBAOABIntegrator(object):
'''Testing for correctness is hard, since this is a stochastic
calculation. However, we can at least run tests to make sure nothing
crashes.'''
def setup(self):
pes = linear
integ = toy.LangevinBAOABIntegrator(dt=0.002, temperature=0.5,
gamma=1.0)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def test_OU_update(self):
# we can't actually test for correct values, but we *can* test that
# some things *don't* happen
assert_equal(self.sim.velocities[0], init_vel[0])
assert_equal(self.sim.velocities[1], init_vel[1])
self.sim.integ._OU_update(self.sim, 0.002)
assert_not_equal(self.sim.velocities[0], init_vel[0])
assert_not_equal(self.sim.velocities[1], init_vel[1])
# tests that the same random number wasn't used for both:
assert_not_equal(self.sim.velocities[0] - init_vel[0],
self.sim.velocities[1] - init_vel[1])
def test_step(self):
self.sim.generate_next_frame()
| choderalab/openpathsampling | openpathsampling/tests/test_toy_dynamics.py | Python | lgpl-2.1 | 12,312 | [
"Gaussian"
] | a5c1cb5da2872799a16a8f4d7a57e55658be9b8d351b239b5dfb47a96d5c4e03 |
'''The fitting module define some classes to easily perform 1D curve
fitting. This module supports fitting (x, y) data with a general
mechanism. Any fitting function can be provided to the fit method
and a few general purpose fuctions are predefined:
* Gaussian
* Lorentzian
* Cosine
* Voigt
.. figure:: _static/fitting_functions.png
:width: 400 px
:height: 300 px
:alt: lattice_3d
:align: center
Plot of the main predefined fitting function in the fitting module.
'''
from scipy import optimize
from scipy.special import wofz
import numpy as np
def fit(y, x=None, expression=None, nb_params=None, init=None):
'''Static method to perform curve fitting directly.
*Parameters*
**y**: the data to match (a 1d numpy array)
**x**: the corresponding x coordinates (optional, None by default)
**expression**: can be either a string to select a predefined
function or alternatively a user defined function with the signature
f(x, p) (in this case you must specify the length of the parameters
array p via setting nb_params).
**nb_params**: the number of parameters of the user defined fitting
function (only needed when a custom fitting function is provided,
None by default)
**init**: a sequence (the length must be equal to the number of
parameters of the fitting function) used to initialise the fitting
function.
For instance, to fit some (x,y) data with a gaussian function, simply use:
::
F = fit(y, x, expression='Gaussian')
Alternatively you may specify you own function directly defined with Python, like:
::
def myf(x, p):
return p[0]*x + p[1]
F = fit(y, x, expression=myf, nb_params=2)
'''
if expression == 'Gaussian':
F = Gaussian()
elif expression == 'Lorentzian':
F = Lorentzian()
elif expression == 'Cosine':
F = Cosine()
elif expression == 'Voigt':
F = Voigt()
else:
F = FitFunction()
if not nb_params:
print('please specify the number of parameters for your fit function, aborting fit...')
return None
if not init:
init = np.ones(nb_params)
if not len(init) == nb_params:
print(
'there are more parameters in the fit function than specified in the initialization sequence, aborting initialization...')
init = np.ones(nb_params)
for i in range(nb_params):
F.add_parameter(init[i], 'p%d' % i)
F.expression = expression
F.fit(y, x)
return F
def lin_reg(xi, yi):
"""Apply linear regression to a series of points.
This function return the best linear fit in the least square sense.
:param ndarray xi: a 1D array of the x coordinate.
:param ndarray yi: a 1D array of the y coordinate.
:return tuple: the linear intercept, slope and correlation coefficient.
"""
n = len(xi)
assert (n == len(yi))
sx = np.sum(xi)
sy = np.sum(yi)
sxx = np.sum(xi ** 2)
sxy = np.sum(xi * yi)
syy = np.sum(yi ** 2)
beta = (n * sxy - sx * sy) / (n * sxx - sx ** 2)
alpha = 1. / n * sy - 1. / n * sx * beta
r = (n * sxy - sx * sy) / np.sqrt((n * sxx - sx ** 2) * (n * syy - sy ** 2))
return alpha, beta, r
class Parameter:
'''A class to handle modiable parameters.'''
def __init__(self, value, name=None):
'''Create a new parameter with the given value.'''
self.value = value
self.name = name
def set(self, value):
'''Set the value of the parameter.'''
self.value = value
def set_name(self, name):
'''Set the name of the parameter.'''
self.name = name
def __call__(self):
'''With this we can use p() if p is an instance of Parameter.'''
return self.value
def __repr__(self):
'''Provide a string representation of the parameter, simply based
on its actual value.'''
return str(self.value)
class FitFunction:
'''This class provides a basic canvas to define a fit function.
You may subclass it to create your own fitting function just as the
predfined fit function do (see `Gaussian` for instance).
'''
def __init__(self):
self.parameters = []
self.expression = None
def get_parameters(self):
'''Return the list of parameters of this fit function.'''
return self.parameters
def get_parameter_names(self):
'''Return the list os parameter names of this fit function.'''
names = []
for p in self.get_parameters:
names.append(p.name)
return names
def add_parameter(self, value, name):
param = Parameter(value, name)
self.parameters.append(param)
def __call__(self, x):
'''With this we can call directly f(x) to evaluate the function f at x.'''
return self.compute(x)
def __repr__(self):
'''Provide a string representation of the fitting function, giving
its type and the list of its parameters with names and values.'''
s = '%s function\n' % self.__class__.__name__
s += 'Parameters are:\n'
params = self.get_parameters()
for i in range(len(params)):
s += ' * %s = %g\n' % (params[i].name, params[i].value)
return s
def compute(self, x):
'''Evaluate the fit function at coordinates x.'''
p = self.get_parameters()
return self.expression(x, p)
def fit(self, y, x=None, verbose=False):
'''Perform fitting on the given data.
This will adjust the parameters of this fit function to match as
well as possible the given data using a least square minimisation.
*Parameters*
**y**: the data to match (a 1d numpy array)
**x**: the corresponding x coordinates (optional, None by default)
**verbose**: boolean, activate verbose mode
'''
# local function to evaluate the cost
global it
it = 0
def cost_func(new_params):
global it
p = self.get_parameters()
if verbose:
print('iteration %d, trying parameters:' % it, p)
it += 1
'''
from matplotlib import pyplot as plt
if it == 0:
plt.plot(x, y, 'bo', label = 'data points')
plt.ylim(0, 0.6)
plt.grid()
plt.legend(numpoints=1,loc='upper left')
plt.savefig('fit/fit_%02d.pdf' % it)
it += 1
plt.clf()
plt.plot(x, y, 'bo', label = 'data points')
plt.plot(x, self(x), 'k-', label = 'gaussian fit')
plt.ylim(0, 0.6)
plt.title('fitting iteration %02d' % it)
plt.legend(numpoints=1,loc='upper left')
plt.savefig('fit/fit_%02d.pdf' % it)
'''
for i, pi in enumerate(p):
pi.set(new_params[i])
return y - self(x)
if x is None: x = np.arange(y.shape[0])
p = [param.value for param in self.get_parameters()]
optimize.leastsq(cost_func, p, Dfun=None, xtol=1.e-6)
class SumOfFitFunction(FitFunction):
def __init__(self, function_list):
self.parameters = []
for f in function_list:
self.parameters.extend(f.get_parameters())
self.expression = function_list
def __repr__(self):
'''Provide a string representation of the fitting function, giving
its type and the list of its parameters with names and values.'''
s = '%s function\n' % self.__class__.__name__
s += 'list of function in the sum:'
for f in self.expression:
s += f.__repr__()
return s
def compute(self, x):
'''Evaluate the fit function at coordinates x.'''
result = np.zeros_like(x)
for f in self.expression:
result += f.compute(x)
return result
class Gaussian(FitFunction):
'''first parameter is position, second is sigma, third is height'''
def __init__(self, position=0.0, sigma=1.0, height=1.0):
FitFunction.__init__(self)
def G(x, p):
return p[2].value * np.exp(-((x - p[0].value) / p[1].value) ** 2)
self.expression = G
self.add_parameter(position, 'position')
self.add_parameter(sigma, 'sigma')
self.add_parameter(height, 'height')
def set_position(self, position):
'''Set the position (center) of the gauss function.'''
self.parameters[0].set(position)
def set_sigma(self, sigma):
'''Set the width (variance) of the gauss function.'''
self.parameters[1].set(sigma)
def set_height(self, height):
'''Set the maximum (height) of the gauss function.'''
self.parameters[2].set(height)
def fwhm(self):
'''Compute the full width at half maximum of the gauss function.'''
p = self.get_parameters()
return 2 * p[1].value * np.sqrt(np.log(2))
class Lorentzian(FitFunction):
'''Lorentzian funtion.
The first parameter is the position, the second is gamma. The maximum
of the function is given by height_factor/(pi*gamma). The FWHM is just 2*gamma.
'''
def __init__(self, position=0.0, gamma=1.0, height_factor=1.0):
FitFunction.__init__(self)
def L(x, p):
return p[2].value * p[1].value / np.pi / ((x - p[0].value) ** 2 + p[1].value ** 2)
self.expression = L
self.add_parameter(position, 'position')
self.add_parameter(gamma, 'width')
self.add_parameter(height_factor, 'height_factor')
def set_position(self, position):
self.parameters[0].set(position)
def set_gamma(self, gamma):
self.parameters[1].set(gamma)
def set_height(self, height):
'''Set the maximum (height) of the Lorentzian function. This
actually set the height factor to the value height*pi*gamma.
'''
self.parameters[2].set(height * np.pi * self.parameters[1].value)
def fwhm(self):
p = self.get_parameters()
return 2 * p[1].value
class Cosine(FitFunction):
'''first parameter is position, second is width'''
def __init__(self, position=0.0, width=1.0):
FitFunction.__init__(self)
def C(x, p):
return np.cos(np.pi * (x - p[0].value) / (2 * p[1].value))
self.expression = C
self.add_parameter(position, 'position')
self.add_parameter(width, 'width')
def set_position(self, position):
self.parameters[0].set(position)
def set_width(self, a):
self.parameters[1].set(a)
def fwhm(self):
p = self.get_parameters()
return 3. / 4 * p[1].value
class Voigt(FitFunction):
'''The Voigt function is also the real part of
w(x) = exp(-x**2) erfc(ix), the Faddeeva function.
Here we use one of the popular implementation which is available
in scipy with the wofz function.
'''
def __init__(self, position=0.0, sigma=1.0, gamma=1.0, height_factor=1.0):
FitFunction.__init__(self)
def V(x, p):
z = (x - p[0].value + 1j * p[2].value) / (p[1].value * np.sqrt(2))
return p[3].value * wofz(z).real / (p[1].value * np.sqrt(2 * np.pi))
self.expression = V
self.add_parameter(position, 'position')
self.add_parameter(sigma, 'sigma')
self.add_parameter(gamma, 'gamma')
self.add_parameter(height_factor, 'height_factor')
def set_position(self, position):
'''Set the position (center) of the Voigt function.'''
self.parameters[0].set(position)
def set_sigma(self, sigma):
'''Set the sigma of the Voigt function.'''
self.parameters[1].set(sigma)
def set_height(self, height):
'''Set the maximum (height) of the Voigt function. This
actually set the height factor to the proper value. Be careful that
if you change the other parameters (sigma, gamma) the maximum height
will be changed.
'''
maxi = self.compute(self.parameters[0].value)
self.parameters[3].set(height / maxi)
def fwhm(self):
'''Compute the full width at half maximum of the Voigt function.
The height factor does not change the fwhm. The fwhm can be evaluated
by the width of the associated Gaussian and Lorentzian functions.
The fwhm is approximated by the equation from J. Olivero and R. Longbothum [1977]'''
p = self.get_parameters()
fg = 2 * p[1].value * np.sqrt(2 * np.log(2))
fl = 2 * p[2].value
return 0.5346 * fl + np.sqrt(0.2166 * fl ** 2 + fg ** 2)
| heprom/pymicro | pymicro/xray/fitting.py | Python | mit | 12,900 | [
"Gaussian"
] | d1396a337338b8690c48fc4bef48976dc5240a5e870af8b59ff4ad12a28105bd |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.bandstructure import HighSymmKpath
test_dir_structs = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', 'space_group_structs')
class HighSymmKpathTest(PymatgenTest):
def test_kpath_generation(self):
triclinic = [1, 2]
monoclinic = range(3, 16)
orthorhombic = range(16, 75)
tetragonal = range(75, 143)
rhombohedral = range(143, 168)
hexagonal = range(168, 195)
cubic = range(195, 231)
species = ['K', 'La', 'Ti']
coords = [[.345, 5, .77298], [.1345, 5.1, .77298], [.7, .8, .9]]
for i in range(230):
sg_num = i + 1
if sg_num in triclinic:
lattice = Lattice([[3.0233057319441246, 0, 0], [0, 7.9850357844548681, 0], [0, 0, 8.1136762279561818]])
elif sg_num in monoclinic:
lattice = Lattice.monoclinic(2, 9, 1, 99)
elif sg_num in orthorhombic:
lattice = Lattice.orthorhombic(2, 9, 1)
elif sg_num in tetragonal:
lattice = Lattice.tetragonal(2, 9)
elif sg_num in rhombohedral:
lattice = Lattice.hexagonal(2, 95)
elif sg_num in hexagonal:
lattice = Lattice.hexagonal(2, 9)
elif sg_num in cubic:
lattice = Lattice.cubic(2)
struct = Structure.from_spacegroup(sg_num, lattice, species, coords)
kpath = HighSymmKpath(struct) # Throws error if something doesn't work, causing test to fail.
struct_file_path = os.path.join(test_dir_structs, 'ICSD_170.cif')
struct = Structure.from_file(struct_file_path)
hkp = HighSymmKpath(struct)
self.assertEqual(hkp.name, 'MCLC5')
def test_kpath_acentered(self):
species = ['K', 'La', 'Ti']
coords = [[.345, 5, .77298], [.1345, 5.1, .77298], [.7, .8, .9]]
lattice = Lattice.orthorhombic(2, 9, 1)
struct = Structure.from_spacegroup(38, lattice, species, coords)
kpath = HighSymmKpath(struct)
kpoints = kpath._kpath['kpoints']
labels = list(kpoints.keys())
self.assertEqual(sorted(labels), sorted(['\\Gamma', 'A', 'A_1', 'R', 'S', 'T', 'X', 'X_1', 'Y', 'Z']))
self.assertEqual(kpoints['\\Gamma'][0], 0.00000000)
self.assertAlmostEqual(kpoints['\\Gamma'][1], 0.00000000)
self.assertAlmostEqual(kpoints['\\Gamma'][2], 0.00000000)
self.assertAlmostEqual(kpoints['A'][0], 0.25308642)
self.assertAlmostEqual(kpoints['A'][1], 0.25308642)
self.assertAlmostEqual(kpoints['A'][2], 0.50000000)
self.assertAlmostEqual(kpoints['A_1'][0], -0.25308642)
self.assertAlmostEqual(kpoints['A_1'][1], 0.74691358)
self.assertAlmostEqual(kpoints['A_1'][2], 0.50000000)
self.assertAlmostEqual(kpoints['R'][0], 0.00000000)
self.assertAlmostEqual(kpoints['R'][1], 0.50000000)
self.assertAlmostEqual(kpoints['R'][2], 0.50000000)
self.assertAlmostEqual(kpoints['S'][0], 0.00000000)
self.assertAlmostEqual(kpoints['S'][1], 0.50000000)
self.assertAlmostEqual(kpoints['S'][2], 0.00000000)
self.assertAlmostEqual(kpoints['T'][0], -0.50000000)
self.assertAlmostEqual(kpoints['T'][1], 0.50000000)
self.assertAlmostEqual(kpoints['T'][2], 0.50000000)
self.assertAlmostEqual(kpoints['X'][0], 0.25308642)
self.assertAlmostEqual(kpoints['X'][1], 0.25308642)
self.assertAlmostEqual(kpoints['X'][2], 0.00000000)
self.assertAlmostEqual(kpoints['X_1'][0], -0.25308642)
self.assertAlmostEqual(kpoints['X_1'][1], 0.74691358)
self.assertAlmostEqual(kpoints['X_1'][2], 0.00000000)
self.assertAlmostEqual(kpoints['Y'][0], -0.50000000)
self.assertAlmostEqual(kpoints['Y'][1], 0.50000000)
self.assertAlmostEqual(kpoints['Y'][2], 0.00000000)
self.assertAlmostEqual(kpoints['Z'][0], 0.00000000)
self.assertAlmostEqual(kpoints['Z'][1], 0.00000000)
self.assertAlmostEqual(kpoints['Z'][2], 0.50000000)
if __name__ == "__main__":
unittest.main()
| fraricci/pymatgen | pymatgen/symmetry/tests/test_kpaths.py | Python | mit | 4,487 | [
"pymatgen"
] | f12f859b4b72d2187d17b6cf96e131556064d1a7a02736618aa92692d272976e |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[41:82,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:7]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_objects_1200ms_scaled_method_v_force_area.py | Python | mit | 4,597 | [
"Mayavi"
] | 7276b57c98914bfae6791ba6f15fbe1198c46eeb5e83397d099fbd6d63ad939b |
#
# Copyright (C) 2000 greg Landrum
#
""" handles doing cross validation with neural nets
This is, perhaps, a little misleading. For the purposes of this module,
cross validation == evaluating the accuracy of a net.
"""
from rdkit.ML.Neural import Network, Trainers
from rdkit.ML.Data import SplitData
import math
def CrossValidate(net, testExamples, tolerance, appendExamples=0):
""" Determines the classification error for the testExamples
**Arguments**
- tree: a decision tree (or anything supporting a _ClassifyExample()_ method)
- testExamples: a list of examples to be used for testing
- appendExamples: a toggle which is ignored, it's just here to maintain
the same API as the decision tree code.
**Returns**
a 2-tuple consisting of:
1) the percent error of the net
2) a list of misclassified examples
**Note**
At the moment, this is specific to nets with only one output
"""
nTest = len(testExamples)
nBad = 0
badExamples = []
for i in range(nTest):
testEx = testExamples[i]
trueRes = testExamples[i][-1]
res = net.ClassifyExample(testEx)
if math.fabs(trueRes - res) > tolerance:
badExamples.append(testEx)
nBad = nBad + 1
return float(nBad) / nTest, badExamples
def CrossValidationDriver(examples, attrs=[], nPossibleVals=[], holdOutFrac=.3, silent=0,
tolerance=0.3, calcTotalError=0, hiddenSizes=None, **kwargs):
"""
**Arguments**
- examples: the full set of examples
- attrs: a list of attributes to consider in the tree building
*This argument is ignored*
- nPossibleVals: a list of the number of possible values each variable can adopt
*This argument is ignored*
- holdOutFrac: the fraction of the data which should be reserved for the hold-out set
(used to calculate the error)
- silent: a toggle used to control how much visual noise this makes as it goes.
- tolerance: the tolerance for convergence of the net
- calcTotalError: if this is true the entire data set is used to calculate
accuracy of the net
- hiddenSizes: a list containing the size(s) of the hidden layers in the network.
if _hiddenSizes_ is None, one hidden layer containing the same number of nodes
as the input layer will be used
**Returns**
a 2-tuple containing:
1) the net
2) the cross-validation error of the net
**Note**
At the moment, this is specific to nets with only one output
"""
nTot = len(examples)
if not kwargs.get('replacementSelection', 0):
testIndices, trainIndices = SplitData.SplitIndices(nTot, holdOutFrac, silent=1, legacy=1,
replacement=0)
else:
testIndices, trainIndices = SplitData.SplitIndices(nTot, holdOutFrac, silent=1, legacy=0,
replacement=1)
trainExamples = [examples[x] for x in trainIndices]
testExamples = [examples[x] for x in testIndices]
nTrain = len(trainExamples)
if not silent:
print('Training with %d examples' % (nTrain))
nInput = len(examples[0]) - 1
nOutput = 1
if hiddenSizes is None:
nHidden = nInput
netSize = [nInput, nHidden, nOutput]
else:
netSize = [nInput] + hiddenSizes + [nOutput]
net = Network.Network(netSize)
t = Trainers.BackProp()
t.TrainOnLine(trainExamples, net, errTol=tolerance, useAvgErr=0, silent=silent)
nTest = len(testExamples)
if not silent:
print('Testing with %d examples' % nTest)
if not calcTotalError:
xValError, _ = CrossValidate(net, testExamples, tolerance)
else:
xValError, _ = CrossValidate(net, examples, tolerance)
if not silent:
print('Validation error was %%%4.2f' % (100 * xValError))
net._trainIndices = trainIndices
return net, xValError
| bp-kelley/rdkit | rdkit/ML/Neural/CrossValidate.py | Python | bsd-3-clause | 3,924 | [
"RDKit"
] | f05b9b707cea5508662675578b051954a751366df13a3c816c26519da7f69bb6 |
import unittest
import random
import string
import sys
from pyokc import pyokc
class TestSequenceFunctions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.u1 = pyokc.User(USERNAME1, PASSWORD1)
cls.u2 = pyokc.User(USERNAME2, PASSWORD2)
def test_search_and_visit(self):
profiles = self.u1.search(location="New York, NY", number=30,
age_min=25, age_max=30,
looking_for="straight girls only",
status="single", last_online=86400,
religion="agnostic", monogamy="monogamous")
self.assertTrue(len(profiles) <= 30)
prev_match_percentage = 100
for p in profiles[:3]:
self.assertTrue(prev_match_percentage >= p.match)
prev_match = p.match
self.u1.visit(p)
self.assertTrue(25 <= p.age <= 30)
self.assertEqual(p.gender, 'Female')
self.assertEqual(p.orientation, 'Straight')
self.assertEqual(p.status, 'Single')
self.assertTrue(p.details['last online'] == 'Online now!' or
"Today" in p.details['last online'] or
"Yesterday" in p.details['last online'])
self.assertIn('monogamous', p.details['relationship type'].lower())
def test_rating(self):
rating = random.choice((0, 5))
self.u1.rate(self.u2.username, rating)
p = self.u1.visit(self.u2.username)
self.assertEqual(rating, p.rating)
def test_age(self):
self.assertIsInstance(self.u1.age, int)
self.assertTrue(18 <= self.u1.age <= 99)
def test_gender(self):
self.assertIn(self.u1.gender, ['Male', 'Female'])
def test_orientation(self):
self.assertIn(self.u1.orientation, ['Straight', 'Bisexual', 'Gay'])
def test_status(self):
self.assertTrue(len(self.u1.status) and isinstance(self.u1.status, str))
def test_messaging(self):
mtext = ''.join(random.choice(string.ascii_letters) for i in range(30))
self.u1.message(self.u2.username, mtext)
self.u1.update_mailbox('outbox', pages=1)
m1 = self.u1.outbox[0]
self.u1.read(m1)
self.assertEqual(m1.messages[0][4:], mtext)
if __name__ == '__main__':
if len(sys.argv) < 5:
sys.exit("ERROR: Two usernames/passwords must be supplied for these tests")
USERNAME1 = sys.argv[1]
PASSWORD1 = sys.argv[2]
USERNAME2 = sys.argv[3]
PASSWORD2 = sys.argv[4]
del sys.argv[1:5]
unittest.main() | odkken/pyokc | unittests.py | Python | mit | 2,664 | [
"VisIt"
] | 26f7fb06f6acf8ad348b9c9e8892c25f08399a74ca1b1c0c5a30850f1741abfb |
"""
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View, RedirectView
import analytics
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce.utils import audit_log
from course_modes.models import CourseMode
from courseware.url_helpers import get_redirect_url
from edx_rest_api_client.exceptions import SlumberBaseException
from edxmako.shortcuts import render_to_response, render_to_string
from embargo import api as embargo_api
from microsite_configuration import microsite
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotFound, AccountValidationError
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
from student.models import CourseEnrollment
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors import (
get_signed_purchase_params, get_purchase_endpoint
)
from verify_student.ssencrypt import has_valid_signature
from verify_student.models import (
VerificationDeadline,
SoftwareSecurePhotoVerification,
VerificationCheckpoint,
VerificationStatus,
IcrvStatusEmailsConfiguration,
)
from verify_student.image import decode_image_data, InvalidImageData
from util.json_request import JsonResponse
from util.date_utils import get_default_time_display
from xmodule.modulestore.django import modulestore
from django.contrib.staticfiles.storage import staticfiles_storage
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonetheless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
requirements = self._requirements(display_steps, request.user.is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(request.user.is_active),
'message_key': message,
'platform_name': settings.PLATFORM_NAME,
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': (
get_default_time_display(verification_deadline)
if verification_deadline else ""
),
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def _redirect_if_necessary(
self,
message,
already_verified,
already_paid,
is_enrolled,
course_key
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first expired mode
for mode in all_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': (
get_default_time_display(deadline_datetime)
if deadline_datetime else ""
)
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor): # pylint: disable=invalid-name
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
sku = request.POST.get('sku', None)
if sku:
try:
current_mode = CourseMode.objects.get(sku=sku)
except CourseMode.DoesNotExist:
log.exception(u'Failed to find CourseMode with SKU [%s].', sku)
if not current_mode:
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
paid_modes = CourseMode.paid_modes_for_course(course_id)
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(login_required)
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt. This is useful for the in-course reverification
# case in which users submit only the face photo and have it matched against their ID photos
# submitted with the initial verification.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
# If this attempt was submitted at a checkpoint, then associate
# the attempt with the checkpoint.
submitted_at_checkpoint = "checkpoint" in params and "course_key" in params
if submitted_at_checkpoint:
checkpoint = self._associate_attempt_with_checkpoint(
request.user, attempt,
params["course_key"],
params["checkpoint"]
)
# If the submission came from an in-course checkpoint
if initial_verification is not None and submitted_at_checkpoint:
self._fire_event(request.user, "edx.bi.reverify.submitted", {
"category": "verification",
"label": unicode(params["course_key"]),
"checkpoint": checkpoint.checkpoint_name,
})
# Send a URL that the client can redirect to in order
# to return to the checkpoint in the courseware.
redirect_url = get_redirect_url(params["course_key"], params["checkpoint"])
return JsonResponse({"url": redirect_url})
# Otherwise, the submission came from an initial verification flow.
else:
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
redirect_url = None
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"checkpoint",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
if "checkpoint" in params:
try:
params["checkpoint"] = UsageKey.from_string(params["checkpoint"]).replace(
course_key=params["course_key"]
)
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid checkpoint location"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _associate_attempt_with_checkpoint(self, user, attempt, course_key, usage_id):
"""
Associate the verification attempt with a checkpoint within a course.
Arguments:
user (User): The user making the attempt.
attempt (SoftwareSecurePhotoVerification): The verification attempt.
course_key (CourseKey): The identifier for the course.
usage_key (UsageKey): The location of the checkpoint within the course.
Returns:
VerificationCheckpoint
"""
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, usage_id)
checkpoint.add_verification_attempt(attempt)
VerificationStatus.add_verification_status(checkpoint, user, "submitted")
return checkpoint
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': microsite.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = microsite.get_value('default_from_email', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
context = {
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
def _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
): # pylint: disable=invalid-name
"""
Compose subject and message for photo reverification email.
Args:
course_key(CourseKey): CourseKey object
user_id(str): User Id
related_assessment_location(str): Location of reverification XBlock
photo_verification(QuerySet): Queryset of SoftwareSecure objects
status(str): Approval status
is_secure(Bool): Is running on secure protocol or not
Returns:
None if any error occurred else Tuple of subject and message strings
"""
try:
usage_key = UsageKey.from_string(related_assessment_location)
reverification_block = modulestore().get_item(usage_key)
course = modulestore().get_course(course_key)
redirect_url = get_redirect_url(course_key, usage_key.replace(course_key=course_key))
subject = "Re-verification Status"
context = {
"status": status,
"course_name": course.display_name_with_default,
"assessment": reverification_block.related_assessment
}
# Allowed attempts is 1 if not set on verification block
allowed_attempts = reverification_block.attempts + 1
used_attempts = VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
left_attempts = allowed_attempts - used_attempts
is_attempt_allowed = left_attempts > 0
verification_open = True
if reverification_block.due:
verification_open = timezone.now() <= reverification_block.due
context["left_attempts"] = left_attempts
context["is_attempt_allowed"] = is_attempt_allowed
context["verification_open"] = verification_open
context["due_date"] = get_default_time_display(reverification_block.due)
context['platform_name'] = settings.PLATFORM_NAME
context["used_attempts"] = used_attempts
context["allowed_attempts"] = allowed_attempts
context["support_link"] = microsite.get_value('email_from_address', settings.CONTACT_EMAIL)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
related_assessment_location
)
)
context["course_link"] = request.build_absolute_uri(redirect_url)
context["reverify_link"] = request.build_absolute_uri(re_verification_link)
message = render_to_string('emails/reverification_processed.txt', context)
log.info(
"Sending email to User_Id=%s. Attempts left for this user are %s. "
"Allowed attempts %s. "
"Due Date %s",
str(user_id), left_attempts, allowed_attempts, str(reverification_block.due)
)
return subject, message
# Catch all exception to avoid raising back to view
except: # pylint: disable=bare-except
log.exception("The email for re-verification sending failed for user_id %s", user_id)
def _send_email(user_id, subject, message):
""" Send email to given user
Args:
user_id(str): User Id
subject(str): Subject lines of emails
message(str): Email message body
Returns:
None
"""
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
user = User.objects.get(id=user_id)
user.email_user(subject, message, from_address)
def _set_user_requirement_status(attempt, namespace, status, reason=None):
"""Sets the status of a credit requirement for the user,
based on a verification checkpoint.
"""
checkpoint = None
try:
checkpoint = VerificationCheckpoint.objects.get(photo_verification=attempt)
except VerificationCheckpoint.DoesNotExist:
log.error("Unable to find checkpoint for user with id %d", attempt.user.id)
if checkpoint is not None:
try:
set_credit_requirement_status(
attempt.user.username,
checkpoint.course_id,
namespace,
checkpoint.checkpoint_location,
status=status,
reason=reason,
)
except Exception: # pylint: disable=broad-except
# Catch exception if unable to add credit requirement
# status for user
log.error("Unable to add Credit requirement status for user with id %d", attempt.user.id)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
_set_user_requirement_status(attempt, 'reverification', 'satisfied')
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
_set_user_requirement_status(
attempt, 'reverification', 'failed', json.dumps(reason)
)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
checkpoints = VerificationCheckpoint.objects.filter(photo_verification=attempt).all()
VerificationStatus.add_status_from_checkpoints(checkpoints=checkpoints, user=attempt.user, status=status)
# Trigger ICRV email only if ICRV status emails config is enabled
icrv_status_emails = IcrvStatusEmailsConfiguration.current()
if icrv_status_emails.enabled and checkpoints:
user_id = attempt.user.id
course_key = checkpoints[0].course_id
related_assessment_location = checkpoints[0].checkpoint_location
subject, message = _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
)
_send_email(user_id, subject, message)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, _ = SoftwareSecurePhotoVerification.user_status(request.user)
# If the verification process is still ongoing i.e. the status for photo
# verification is either 'submitted' or 'must_retry' then its marked as
# 'pending'
if status in ["must_reverify", "expired", "pending"]:
context = {
"user_full_name": request.user.profile.name,
"platform_name": settings.PLATFORM_NAME,
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
class InCourseReverifyView(View):
"""
The in-course reverification view.
In-course reverification occurs while a student is taking a course.
At points in the course, students are prompted to submit face photos,
which are matched against the ID photos the user submitted during their
initial verification.
Students are prompted to enter this flow from an "In Course Reverification"
XBlock (courseware component) that course authors add to the course.
See https://github.com/edx/edx-reverification-block for more details.
"""
@method_decorator(login_required)
def get(self, request, course_id, usage_id):
"""Display the view for face photo submission.
Args:
request(HttpRequest): HttpRequest object
course_id(str): A string of course id
usage_id(str): Location of Reverification XBlock in courseware
Returns:
HttpResponse
"""
user = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course is None:
log.error(u"Could not find course '%s' for in-course reverification.", course_key)
raise Http404
try:
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_location=usage_id)
except VerificationCheckpoint.DoesNotExist:
log.error(
u"No verification checkpoint exists for the "
u"course '%s' and checkpoint location '%s'.",
course_key, usage_id
)
raise Http404
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(user)
if not initial_verification:
return self._redirect_to_initial_verification(user, course_key, usage_id)
# emit the reverification event
self._track_reverification_events('edx.bi.reverify.started', user.id, course_id, checkpoint.checkpoint_name)
context = {
'course_key': unicode(course_key),
'course_name': course.display_name_with_default,
'checkpoint_name': checkpoint.checkpoint_name,
'platform_name': settings.PLATFORM_NAME,
'usage_id': usage_id,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/incourse_reverify.html", context)
def _track_reverification_events(self, event_name, user_id, course_id, checkpoint): # pylint: disable=invalid-name
"""Track re-verification events for a user against a reverification
checkpoint of a course.
Arguments:
event_name (str): Name of event being tracked
user_id (str): The ID of the user
course_id (unicode): ID associated with the course
checkpoint (str): Checkpoint name
Returns:
None
"""
log.info(
u"In-course reverification: event %s occurred for user '%s' in course '%s' at checkpoint '%s'",
event_name, user_id, course_id, checkpoint
)
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': "verification",
'label': unicode(course_id),
'checkpoint': checkpoint
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _redirect_to_initial_verification(self, user, course_key, checkpoint):
"""
Redirect because the user does not have an initial verification.
We will redirect the user to the initial verification flow,
passing the identifier for this checkpoint. When the user
submits a verification attempt, it will count for *both*
the initial and checkpoint verification.
Arguments:
user (User): The user who made the request.
course_key (CourseKey): The identifier for the course for which
the user is attempting to re-verify.
checkpoint (string): Location of the checkpoint in the courseware.
Returns:
HttpResponse
"""
log.info(
u"User %s does not have an initial verification, so "
u"he/she will be redirected to the \"verify later\" flow "
u"for the course %s.",
user.id, course_key
)
base_url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_key)})
params = urllib.urlencode({"checkpoint": checkpoint})
full_url = u"{base}?{params}".format(base=base_url, params=params)
return redirect(full_url)
| ahmadiga/min_edx | lms/djangoapps/verify_student/views.py | Python | agpl-3.0 | 58,792 | [
"VisIt"
] | f02ab1a7bbbf1f3d626d59e0e6768a2cdd6342439025db37a5071b5b056418ca |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from flaky import flaky
from textwrap import dedent
from unittest import skip
from nose.plugins.attrib import attr
from bok_choy.promise import EmptyPromise
from bok_choy.web_app_test import WebAppTest
from ..helpers import (
UniqueCourseTest,
EventsTestMixin,
load_data_str,
generate_course_key,
select_option_by_value,
element_has_text
)
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.problem import ProblemPage
from ...pages.lms.video.video import VideoPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.studio.settings import SettingsPage
from ...pages.lms.login_and_register import CombinedLoginAndRegisterPage, ResetPasswordPage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from ...pages.lms.course_wiki import CourseWikiPage, CourseWikiEditPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
@attr('shard_1')
class ForgotPasswordPageTest(UniqueCourseTest):
"""
Test that forgot password forms is rendered if url contains 'forgot-password-modal'
in hash.
"""
def setUp(self):
""" Initialize the page object """
super(ForgotPasswordPageTest, self).setUp()
self.reset_password_page = ResetPasswordPage(self.browser)
def test_reset_password_form_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Expect that reset password form is visible on the page
self.assertTrue(self.reset_password_page.is_form_visible())
@attr('shard_1')
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
@flaky # TODO fix this, see ECOM-1165
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("PASSWORD RESET EMAIL SENT", self.login_page.wait_for_success())
def test_password_reset_failure(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a failure message
self.assertIn(
"No user with the provided email address exists.",
self.login_page.wait_for_errors()
)
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr('shard_1')
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
terms_of_service=True
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
self.assertEqual("want to change your account settings?", self.dashboard_page.sidebar_menu_title.lower())
self.assertEqual(
"click the arrow next to your username above.",
self.dashboard_page.sidebar_menu_description.lower()
)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User",
terms_of_service=False
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public username.', errors)
self.assertIn(u'You must agree to the edX Terms of Service and Honor Code.', errors)
self.assertIn(u'Please select your Country.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
@attr('shard_1')
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
@skip("Flaky 02/02/2015")
def test_immediate_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Expect enrollment activated event
self.assert_event_emitted_num_times(
"edx.course.enrollment.activated",
self.start_time,
student_id,
1
)
# Expect that one mode_changed enrollment event fired as part of the upgrade
self.assert_event_emitted_num_times(
"edx.course.enrollment.mode_changed",
self.start_time,
student_id,
1
)
# Proceed to verification
self.payment_and_verification_flow.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Expect enrollment activated event
self.assert_event_emitted_num_times(
"edx.course.enrollment.activated",
self.start_time,
student_id,
1
)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Expect that one mode_changed enrollment event fired as part of the upgrade
self.assert_event_emitted_num_times(
"edx.course.enrollment.mode_changed",
self.start_time,
student_id,
1
)
# Expect no enrollment activated event
self.assert_event_emitted_num_times(
"edx.course.enrollment.activated",
self.start_time,
student_id,
0
)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
class CourseWikiTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_edit_course_wiki(self):
"""
Wiki page by default is editable for students.
After accessing the course wiki,
Replace the content of the default page
Confirm new content has been saved
"""
content = "hello"
self._open_editor()
self.course_wiki_edit_page.replace_wiki_content(content)
self.course_wiki_edit_page.save_wiki_content()
actual_content = unicode(self.course_wiki_page.q(css='.wiki-article p').text[0])
self.assertEqual(content, actual_content)
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3'),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Course Info')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
self.assertTrue(self.tab_nav.is_on_tab('Wiki'))
# Assert that a default wiki is created
expected_article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
def test_courseware_nav(self):
"""
Navigate to a particular unit in the courseware.
"""
# Navigate to the courseware page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
# Check that the courseware navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.course_nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
@flaky # TODO: fix this, see TNL-2083
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_info_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
class VideoTest(UniqueCourseTest):
"""
Navigate to a video in the courseware and play it.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(VideoTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course fixture with a video component
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('video', 'Video')
)))).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
@skip("BLD-563: Video Player Stuck on Pause")
def test_video_player(self):
"""
Play a video in the courseware.
"""
# Navigate to a video
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
# The video should start off paused
# Since the video hasn't loaded yet, it's elapsed time is 0
self.assertFalse(self.video.is_playing)
self.assertEqual(self.video.elapsed_time, 0)
# Play the video
self.video.play()
# Now we should be playing
self.assertTrue(self.video.is_playing)
# Commented the below EmptyPromise, will move to its page once this test is working and stable
# Also there is should be no Promise check in any test as this should be done in Page Object
# Wait for the video to load the duration
# EmptyPromise(
# lambda: self.video.duration > 0,
# 'video has duration', timeout=20
# ).fulfill()
# Pause the video
self.video.pause()
# Expect that the elapsed time and duration are reasonable
# Again, we can't expect the video to actually play because of
# latency through the ssh tunnel
self.assertGreaterEqual(self.video.elapsed_time, 0)
self.assertGreaterEqual(self.video.duration, self.video.elapsed_time)
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
def test_visible_to_staff(self):
"""
Scenario: All content is visible for a user marked is_staff (different from course staff)
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an account marked 'is_staff'
Then I can see all course content
"""
AutoAuthPage(self.browser, username="STAFF_TESTER", email="johndoe_staff@example.com",
course_id=self.course_id, staff=True).visit()
self.courseware_page.visit()
self.assertEqual(3, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual(["Html Child in locked unit", "Html Child in unlocked unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual(["Html Child in visible unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Locked Subsection")
self.assertEqual(["Html Child in locked subsection"], self.course_nav.sequence_items)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.assertEqual(2, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual(["Html Child in unlocked unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual(["Html Child in visible unit"], self.course_nav.sequence_items)
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.assertTrue(self.courseware_page.tooltips_displayed())
class PreRequisiteCourseTest(UniqueCourseTest):
"""
Tests that pre-requisite course messages are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PreRequisiteCourseTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.prc_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'prc_test_run',
'display_name': 'PR Test Course' + self.unique_id
}
CourseFixture(
self.prc_info['org'], self.prc_info['number'],
self.prc_info['run'], self.prc_info['display_name']
).install()
pre_requisite_course_key = generate_course_key(
self.prc_info['org'],
self.prc_info['number'],
self.prc_info['run']
)
self.pre_requisite_course_id = unicode(pre_requisite_course_key)
self.dashboard_page = DashboardPage(self.browser)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_dashboard_message(self):
"""
Scenario: Any course where there is a Pre-Requisite course Student dashboard should have
appropriate messaging.
Given that I am on the Student dashboard
When I view a course with a pre-requisite course set
Then At the bottom of course I should see course requirements message.'
"""
# visit dashboard page and make sure there is not pre-requisite course message
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.pre_requisite_message_displayed())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set pre-requisite course
self.settings_page.visit()
self._set_pre_requisite_course()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit dashboard page again now it should have pre-requisite course message
self.dashboard_page.visit()
EmptyPromise(lambda: self.dashboard_page.available_courses > 0, 'Dashboard page loaded').fulfill()
self.assertTrue(self.dashboard_page.pre_requisite_message_displayed())
def _set_pre_requisite_course(self):
"""
set pre-requisite course
"""
select_option_by_value(self.settings_page.pre_requisite_course_options, self.pre_requisite_course_id)
self.settings_page.save_changes()
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_check()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_check()
self.assertFalse(problem_page.is_correct())
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at courseware
page.
Given that I am on the courseware page
When I view the courseware that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
entrance_exam_link_selector = 'div#accordion nav div h3 a'
# visit courseware page and make sure there is not entrance exam chapter.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.wait_for_page()
self.assertTrue(self.settings_page.is_browser_on_page())
self.settings_page.entrance_exam_field.click()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
| DefyVentures/edx-platform | common/test/acceptance/tests/lms/test_lms.py | Python | agpl-3.0 | 40,301 | [
"VisIt"
] | 6ac46077a3d9d6314b73535011e49918fc1a9c0e4104990a06383c205e5fadcf |
"""This module provides the FourierNoiseDomainExpression class to represent
f-domain (Fourier domain) noise expressions.
Copyright 2014--2022 Michael Hayes, UCECE
"""
from __future__ import division
from .sym import symsimplify
from .functions import sqrt
from .sym import pi, omegasym, fsym
from .state import state
from .domains import FourierNoiseDomain
from .expr import expr
from .noiseexpr import NoiseExpression
from .omegaexpr import omega, AngularFourierDomainExpression
from .fexpr import FourierDomainExpression
from .voltagemixin import VoltageMixin
from .currentmixin import CurrentMixin
import sympy as sym
class FourierNoiseDomainExpression(FourierNoiseDomain, NoiseExpression):
"""Frequency domain (one-sided) noise spectrum expression (amplitude
spectral density).
This characterises a wide-sense stationary, zero-mean Gaussian
noise random process.
When performing arithmetic on two FourierNoiseDomainExpression
expressions it is assumed that they are uncorrelated unless they
have the same nid (noise indentifier). If the nid is not
specified, a new one is created.
Uncorrelated noise expressions are added in quadrature (on a power
basis). Thus (FourierNoiseDomainExpression(3) +
FourierNoiseDomainExpression(4)).expr = 5 since 5 = sqrt(3**2 +
4**2)
FourierNoiseDomainExpression(3) != FourierNoiseDomainExpression(3)
since they are different noise realisations albeit with the same
properties. However, FourierNoiseDomainExpression(3).expr ==
FourierNoiseDomainExpression(3).expr. Similarly,
FourierNoiseDomainExpression(3, nid='n1') ==
FourierNoiseDomainExpression(3, nid='n1') since they have the same
noise identifier and thus have the same realisation.
Caution: The sum of two noise expressions generates a noise
expression with a new nid. This can lead to unexpected results
since noise expressions with different nids are assumed to be
uncorrelated. For example, consider:
a = FourierNoiseDomainExpression(3);
b = FourierNoiseDomainExpression(4)
a + b - b gives sqrt(41) but a + b - a gives sqrt(34).
This case is correctly handled by the SuperpositionVoltage and
SuperpositionCurrent classes since each noise component is stored
and considered separately.
(SuperpositionVoltage(a) + SuperpositionVoltage(b) -
SuperpositionVoltage(b)).n gives 3 as expected.
"""
var = fsym
def plot(self, fvector=None, **kwargs):
"""Plot frequency response at values specified by fvector.
There are many plotting options, see matplotlib.pyplot.plot.
For example:
V.plot(fvector, log_frequency=True)
V.real.plot(fvector, color='black')
V.phase.plot(fvector, color='black', linestyle='--')
By default complex data is plotted as separate plots of magnitude (dB)
and phase.
"""
from .plot import plot_frequency
return plot_frequency(self, fvector, **kwargs)
def transform(self, arg, **assumptions):
"""Transform into a different domain."""
arg = expr(arg)
if isinstance(arg, AngularFourierDomainExpression):
result = self.subs(omega / (2 * pi))
cls = self._class_by_quantity(self.quantity, 'angular fourier noise')
return cls(result, nid=self.nid, **assumptions)
elif isinstance(arg, FourierDomainExpression):
result = self.subs(arg, **assumptions)
cls = self._class_by_quantity(self.quantity, 'fourier noise')
return cls(result, nid=self.nid, **assumptions)
return super(FourierNoiseDomainExpression, self).transform(arg, **assumptions)
class FourierNoiseDomainVoltage(VoltageMixin, FourierNoiseDomainExpression):
"""Voltage noise amplitude spectral density (units V/rtHz).
This can be a function of linear frequency, f. For example,
to model an opamp voltage noise:
v = FourierNoiseDomainVoltage(1e-8 / sqrt(f) + 8e-9)
"""
quantity_label = 'Voltage noise spectral density'
units = 'V/rtHz'
class FourierNoiseDomainCurrent(CurrentMixin, FourierNoiseDomainExpression):
"""Current noise amplitude spectral density (units A/rtHz).
This can be a function of linear frequency, f. For example,
to model an opamp current noise:
i = FourierNoiseDomainCurrent(3e-12 / sqrt(f) + 200e-15)
"""
quantity_label = 'Current noise spectral density'
units = 'A/rtHz'
from .expressionclasses import expressionclasses
expressionclasses.register('fourier noise', FourierNoiseDomainExpression, None,
('voltage', 'current'))
from .fexpr import f
from .noiseomegaexpr import AngularFourierNoiseDomainExpression
| mph-/lcapy | lcapy/noisefexpr.py | Python | lgpl-2.1 | 4,770 | [
"Gaussian"
] | 635e9fdeea487c10522c8373ed022bfe0697b494dcae5e1c8b7b71759a69f78b |
from __future__ import print_function
import itertools
from ctypes import WINFUNCTYPE, POINTER, Structure, c_int, c_void_p, windll, pointer
from ctypes.wintypes import BOOL, HWND, RECT, LPARAM, HDC, HANDLE, DWORD, WCHAR
# Constants
PHYSICAL_MONITOR_DESCRIPTION_SIZE = 128
TRUE = 1
# Enums
MC_COLOR_TEMPERATURE_UNKNOWN = 0
MC_COLOR_TEMPERATURE_4000K = 1
MC_COLOR_TEMPERATURE_5000K = 2
MC_COLOR_TEMPERATURE_6500K = 3
MC_COLOR_TEMPERATURE_7500K = 4
MC_COLOR_TEMPERATURE_8200K = 5
MC_COLOR_TEMPERATURE_9300K = 6
MC_COLOR_TEMPERATURE_10000K = 7
MC_COLOR_TEMPERATURE_11500K = 8
MC_COLOR_TEMPERATURE = ['UNKNOWN', '4000K', '5000K', '6500K', '7500K', '8200K', '9300K', '10000K', '11500K']
MC_SHADOW_MASK_CATHODE_RAY_TUBE = 0
MC_APERTURE_GRILL_CATHODE_RAY_TUBE = 1
MC_THIN_FILM_TRANSISTOR = 2
MC_LIQUID_CRYSTAL_ON_SILICON = 3
MC_PLASMA = 4
MC_ORGANIC_LIGHT_EMITTING_DIODE = 5
MC_ELECTROLUMINESCENT = 6
MC_MICROELECTROMECHANICAL = 7
MC_FIELD_EMISSION_DEVICE = 8
MC_DISPLAY_TECHNOLOGY_TYPE = [
'SHADOW MASK CATHODE RAY TUBE',
'APERTURE GRILL CATHODE RAY TUBE',
'THIN FILM TRANSISTOR',
'LIQUID CRYSTAL ON SILICON',
'PLASMA',
'ORGANIC LIGHT EMITTING DIODE',
'ELECTROLUMINESCENT',
'MICROELECTROMECHANICAL',
'FIELD EMISSION DEVICE'
]
MC_HORIZONTAL_POSITION = 0
MC_VERTICAL_POSITION = 1
MC_WIDTH = 0
MC_HEIGHT = 1
MC_RED_DRIVE = 0
MC_GREEN_DRIVE = 1
MC_BLUE_DRIVE = 2
MC_RED_GAIN = 0
MC_GREEN_GAIN = 1
MC_BLUE_GAIN = 2
# Types
class PHYSICAL_MONITOR(Structure):
_fields_ = ("hPhysicalMonitor", HANDLE), ("szPhysicalMonitorDescription", WCHAR * PHYSICAL_MONITOR_DESCRIPTION_SIZE)
HMONITOR = HANDLE
# Function prototypes
MONITOR_ENUM_PROC = WINFUNCTYPE(BOOL, HMONITOR, HDC, POINTER(RECT), LPARAM)
class classproperty(object):
# With regards to http://stackoverflow.com/a/13624858/287185
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class PhysicalMonitor(object):
""" Represents a physical monitor.
"""
def __init__(self, handle, description):
self.__handle, self.description = handle, description
@classproperty
def all(cls):
""" Gets a list of all physical monitors in the system.
"""
return list(itertools.chain.from_iterable(m.physical for m in get_display_monitors()))
@property
def min_brightness(self):
""" Gets the monitor's minimum brightness, as an integer
"""
return self.get_brightness()[0]
@property
def max_brightness(self):
""" Gets the monitor's maximum brightness, as an integer
"""
return self.get_brightness()[2]
@property
def brightness(self):
""" Gets or sets the current monitor brightness, as an integer
"""
return self.get_brightness()[1]
@brightness.setter
def brightness(self, value):
windll.Dxva2.SetMonitorBrightness(self.__handle, DWORD(value))
@property
def min_contrast(self):
""" Gets the monitor's minimum contrast, as an integer
"""
return self.get_contrast()[0]
@property
def max_contrast(self):
""" Gets the monitor's maximum contrast, as an integer
"""
return self.get_contrast()[2]
@property
def contrast(self):
""" Gets or sets the current monitor contrast, as an integer
"""
return self.get_contrast()[1]
@contrast.setter
def contrast(self, value):
windll.Dxva2.SetMonitorContrast(self.__handle, DWORD(value))
@property
def color_temperature(self):
""" Gets the monitor's current color temperature, as a string.
Possible values: 'UNKNOWN', '4000K', '5000K', '6500K', '7500K', '8200K', '9300K', '10000K', '11500K'
"""
color_temp = c_int(1)
windll.Dxva2.GetMonitorColorTemperature(self.__handle, pointer(color_temp))
return MC_COLOR_TEMPERATURE[color_temp.value]
@property
def color_temperature(self):
""" Gets the monitor's current color temperature, as a string.
Possible values: 'UNKNOWN', '4000K', '5000K', '6500K', '7500K', '8200K', '9300K', '10000K', '11500K'
"""
color_temp = c_int(1)
windll.Dxva2.GetMonitorColorTemperature(self.__handle, pointer(color_temp))
return MC_COLOR_TEMPERATURE[color_temp.value]
@color_temperature.setter
def color_temperature(self, new_temp):
if type(new_temp) == str:
new_temp = MC_COLOR_TEMPERATURE.index(new_temp)
windll.Dxva2.SetMonitorColorTemperature(self.__handle, new_temp)
@property
def technology_type(self):
""" Return the monitor technology type, as one of the following strings:
- 'SHADOW MASK CATHODE RAY TUBE'
- 'APERTURE GRILL CATHODE RAY TUBE'
- 'THIN FILM TRANSISTOR'
- 'LIQUID CRYSTAL ON SILICON'
- 'PLASMA'
- 'ORGANIC LIGHT EMITTING DIODE'
- 'ELECTROLUMINESCENT'
- 'MICROELECTROMECHANICAL'
- 'FIELD EMISSION DEVICE'
"""
tech_type = DWORD(0)
windll.Dxva2.GetMonitorTechnologyType(self.__handle, pointer(tech_type))
return MC_DISPLAY_TECHNOLOGY_TYPE[tech_type.value]
def degauss(self):
result = windll.Dxva2.DegaussMonitor(self.__handle)
if result == 0:
last_error = windll.Kernel32.GetLastError()
print("Error encountered: %x" % last_error)
def get_brightness(self):
""" Return a tuple of three integers, representing the minimum, current, and maximum monitor brightness.
"""
min, crt, max = DWORD(0), DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorBrightness(self.__handle, pointer(min), pointer(crt), pointer(max))
return (min.value, crt.value, max.value)
def get_capabilities(self):
caps, supported_temps = DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorCapabilities(self.__handle, pointer(caps), pointer(supported_temps))
return (caps.value, supported_temps.value)
def get_contrast(self):
""" Return a tuple of three integers, representing the minimum, current, and maximum monitor contrast.
"""
min, crt, max = DWORD(0), DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorContrast(self.__handle, pointer(min), pointer(crt), pointer(max))
return (min.value, crt.value, max.value)
def get_display_area_position(self, position_type):
""" Return a tuple of three integers, representing the minimum, current, and maximum monitor display area position.
The following values for *position_type* are supported:
- *MC_VERTICAL_POSITION* - return the vertical position.
- *MC_HORIZONTAL_POSITION* - return the horizontal position.
"""
min, crt, max = DWORD(0), DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorDisplayAreaPosition(self.__handle, position_type, pointer(min), pointer(crt), pointer(max))
return (min, crt, max)
def get_display_area_size(self, size_type):
""" Return a tuple of three integers, representing the minimum, current, and maximum monitor width or height.
The following values for *size_type* are supported:
- *MC_WIDTH* - return the monitor width.
- *MC_HEIGHT* - return the monitor height.
"""
min, crt, max = DWORD(0), DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorDisplayAreaSize(self.__handle, size_type, pointer(min), pointer(crt), pointer(max))
return (min, crt, max)
def get_red_green_or_blue_drive(self, drive_type):
min, crt, max = DWORD(0), DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorRedGreenOrBlueDrive(self.__handle, drive_type, pointer(min), pointer(crt), pointer(max))
return (min.value, crt.value, max.value)
def get_red_green_or_blue_gain(self, gain_type):
min, crt, max = DWORD(0), DWORD(0), DWORD(0)
windll.Dxva2.GetMonitorRedGreenOrBlueGain(self.__handle, gain_type, pointer(min), pointer(crt), pointer(max))
return (min.value, crt.value, max.value)
def restore_factory_color_defaults(self):
windll.Dxva2.RestoreMonitorFactoryColorDefaults()
def restore_factory_defaults(self):
windll.Dxva2.RestoreMonitorFactoryDefaults()
def save_current_settings(self):
windll.Dxva2.SaveCurrentMonitorSettings()
class DisplayMonitor(object):
def __init__(self, hmonitor, rect):
self.__handle = hmonitor
self.top, self.bottom, self.left, self.right = rect.top, rect.bottom, rect.left, rect.right
@property
def physical(self):
cnt_mon = DWORD(0)
windll.Dxva2.GetNumberOfPhysicalMonitorsFromHMONITOR(self.__handle, pointer(cnt_mon))
phys_monitors = (PHYSICAL_MONITOR * cnt_mon.value)()
windll.Dxva2.GetPhysicalMonitorsFromHMONITOR(self.__handle, 1, phys_monitors)
return [PhysicalMonitor(x.hPhysicalMonitor, x.szPhysicalMonitorDescription) for x in phys_monitors]
def get_display_monitors():
def monitor_enum_proc(hmonitor, hdc_monitor, rc_monitor, data):
result.append(DisplayMonitor(hmonitor, rc_monitor.contents))
return TRUE
result = []
windll.user32.EnumDisplayMonitors(None, None, MONITOR_ENUM_PROC(monitor_enum_proc), None)
return result
| cmihai/winmoncon | winmoncon/monitor.py | Python | unlicense | 9,426 | [
"CRYSTAL"
] | ff9e60b896fe8f3b5ac7a332bcd43e80d61ae2e8e4b1af7114a803e026ba4b69 |
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom chat module for Instantbird."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.chat as chat
########################################################################
# #
# The Instantbird chat class. #
# #
########################################################################
class Chat(chat.Chat):
def __init__(self, script, buddyListAncestries):
# IMs get inserted as embedded object characters in these roles.
#
self._messageParentRoles = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_PARAGRAPH]
chat.Chat.__init__(self, script, buddyListAncestries)
########################################################################
# #
# InputEvent handlers and supporting utilities #
# #
########################################################################
def getMessageFromEvent(self, event):
"""Get the actual displayed message. This will almost always be the
unaltered any_data from an event of type object:text-changed:insert.
Arguments:
- event: the Event from which to take the text.
Returns the string which should be presented as the newly-inserted
text. (Things like chatroom name prefacing get handled elsewhere.)
"""
string = ""
# IMs are written in areas that look like bubbles. When a new bubble
# is inserted, we see an embedded object character inserted into the
# document frame. The first paragraph is the bubble title; the
# rest (usually just one) are the message itself.
#
if event.source.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
bubble = event.source[event.detail1]
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_PARAGRAPH
paragraphs = pyatspi.findAllDescendants(bubble, hasRole)
# If the user opted the non-default, "simple" appearance, then this
# might not be a bubble at all, but a paragraph.
#
if not paragraphs and bubble.getRole() == pyatspi.ROLE_PARAGRAPH:
paragraphs.append(bubble)
for paragraph in paragraphs:
msg = self._script.utilities.substring(paragraph, 0, -1)
if msg == self._script.EMBEDDED_OBJECT_CHARACTER:
# This seems to occur for non-focused conversations.
#
msg = self._script.utilities.substring(paragraph[0], 0, -1)
string = self._script.utilities.appendString(string, msg)
return string
# If we instead have a section, we are writing another message into
# the existing bubble. In this case, we get three separate items
# inserted: a separator, a paragraph with the desired text, and an
# empty section.
#
if event.source.getRole() == pyatspi.ROLE_SECTION:
obj = event.source[event.detail1]
if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
try:
text = obj.queryText()
except:
pass
else:
string = text.getText(0, -1)
return string
########################################################################
# #
# Convenience methods for identifying, locating different accessibles #
# #
########################################################################
def isChatRoomMsg(self, obj):
"""Returns True if the given accessible is the text object for
associated with a chat room conversation.
Arguments:
- obj: the accessible object to examine.
"""
# We might need to refine this later. For now, just get things
# working.
#
if obj and obj.getRole() in self._messageParentRoles:
return True
return False
def getChatRoomName(self, obj):
"""Attempts to find the name of the current chat room.
Arguments:
- obj: The accessible of interest
Returns a string containing what we think is the chat room name.
"""
name = ""
ancestor = self._script.utilities.ancestorWithRole(
obj,
[pyatspi.ROLE_SCROLL_PANE, pyatspi.ROLE_FRAME],
[pyatspi.ROLE_APPLICATION])
if ancestor and ancestor.getRole() == pyatspi.ROLE_SCROLL_PANE:
# The scroll pane has a proper labelled by relationship set.
#
name = self._script.utilities.displayedLabel(ancestor)
if not name:
try:
text = self._script.utilities.displayedText(ancestor)
if text.lower().strip() != self._script.name.lower().strip():
name = text
except:
pass
return name
def isFocusedChat(self, obj):
"""Returns True if we plan to treat this chat as focused for
the purpose of deciding whether or not a message should be
presented to the user.
Arguments:
- obj: the accessible object to examine.
"""
# Normally, we'd see if the top level window associated
# with this object had STATE_ACTIVE. That doesn't work
# here. So see if the script for the locusOfFocus is
# this script. If so, the only other possibility is that
# we're in the buddy list instead.
#
if obj and obj.getState().contains(pyatspi.STATE_SHOWING) \
and self._script.utilities.isInActiveApp(obj) \
and not self.isInBuddyList(obj):
return True
return False
| pvagner/orca | src/orca/scripts/apps/Instantbird/chat.py | Python | lgpl-2.1 | 7,140 | [
"ORCA"
] | 95c1049dd740988cad0875be165990ed4b57a5c1e0f622c40575b86b37027b0a |
import numpy as np
import array
import os, sys
import re
import time
import multiprocessing
import h5py
import logging
from astropy.table import Table, Column
from astropy import units as u
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--params", type=str,
help = "Parameter file")
parser.add_argument("-q", "--quiet", help = "Suppress extra outputs",
action = "store_true")
args = parser.parse_args()
quiet = args.quiet
params_root = re.split(".py", args.params)[0]
if os.path.isfile(params_root+".pyc"):
os.remove(params_root+".pyc")
import importlib
try:
params = importlib.import_module(params_root)
print('Successfully loaded "{0}" as params'.format(args.params))
importlib.reload(params)
except:
print('Failed to load "{0}" as params'.format(args.params))
raise
if quiet:
quietprint = lambda *a: None
else:
def quietprint(*args):
for arg in args:
print(arg, end=' ')
print()
# Fitting function definition for later use by Processess
def galaxyFit(inputQueue, printQueue, printlock):
for gal in iter(inputQueue.get, 'STOP'):
j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
flux_obs = obs[gal,:]
flux_err = obs_err[gal,:]
#flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero
I = np.where(flux_err > 0.)[0] # Find bands with no observation
if len(I) == 0:
if include_rest:
M_scaled = np.ones(len(fo)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output_string = '{0} {1} {2} {3} {4} {5} {6} {7}' \
' {8} {9} {10} {11} {12} {13} {14} {15} {16}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99, -99, -99,-99,len(I),-99,z[j],restframe_output,'\n')
else:
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99,-99, -99,-99,len(I),-99,'\n')
printQueue.put(output_string)
continue
flux_obs = flux_obs[I] # and exclude from fit
flux_err = flux_err[I]
flux_models = f[j,I,:]
tot_err = np.sqrt(flux_err**2 + (0.1*flux_obs)**2)
top = 0.
bottom = 0.
for i in range(len(flux_obs)):
top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2)
bottom += (flux_models[i,:]**2)/(tot_err[i]**2)
scale = top/bottom
scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc))
chisq = 0.
for i in range(len(flux_obs)):
chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(flux_err[i])**2)
chimin, minind = np.nanmin(chisq), np.nanargmin(chisq)
if np.isinf(chimin) or np.isnan(minind):
if include_rest:
M_scaled = np.ones(len(flux_obs)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15} {16}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99, -99, -99,-99,len(I),-99,z[j],restframe_output,'\n')
else:
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99,-99, -99,-99,len(I),-99,'\n')
printQueue.put(output_string)
continue
#Find the coordinate of the model with the bestfit mass
mi, tgi, ti, tvi, fi = np.unravel_index(minind,
(n_metal, n_tg,
n_tau, n_tauv, n_fesc))
Bestfit_Mass = np.log10(scale[mi, tgi, ti, tvi, fi]*flux_corr)
Bestfit_SFR = (scale[mi, tgi, ti, tvi, fi] *
SFR[mi, tgi, ti, tvi, fi]*flux_corr)
#Bestfit_Beta = beta[tgi,tvi,ti,mi]
Bestfit_Beta = -99.
#Scale the observed tot_mag band of the template to be the same as the observed tot_mag band of the galaxy
#Convert the templates so they are no longer units of per stellar mass
F_rest = f[0,:]*scale[mi, tgi, ti, tvi, fi]*flux_corr
restframeMags = 23.9 - 2.5*np.log10(F_rest)
#UV_rest = UV_flux[0]*scale[tgi,tvi,ti,mi]*flux_corr
#restframeMUV = 23.9 - 2.5*np.log10(UV_rest)
M_scaled = restframeMags[:, mi, tgi, ti, tvi, fi]
#MUV_scaled = restframeMUV[tgi,tvi,ti,mi]
MUV_scaled = -99.
if np.isnan(Bestfit_Mass) or np.isinf(chimin):
Bestfit_Mass = -99
#M_scaled[:] = -99
tgs = -99
tvs = -99
taus = -99
mis = -99
escape_fraction = -99
else:
tgs = tg[tgi]/1e9
tvs = tv[tvi]
taus = tau[ti]
mis = metallicities[mi]
escape_fraction = fesc[fi]
printlock.acquire()
print('{0:6d} {1:8d} {2:>5.2f} {3:>7.2f} {4:>8.1f} {5:>8.3f} {6:>5.1f} {7:>8.2f} {8:>4.2f} {9:>5.2f}'.format(gal+1,ID[gal], zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,np.log10(Bestfit_SFR)))
if include_rest:
restframe_output = ' '.join(M_scaled.astype('str'))
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15} {16}'.format(gal+1,ID[gal],zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis, MUV_scaled, minind,Bestfit_SFR,len(I),Bestfit_Beta,z[j],restframe_output,'\n')
else:
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14}'.format(gal+1,ID[gal],zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis, MUV_scaled, minind,Bestfit_SFR,len(I),Bestfit_Beta,'\n')
printlock.release()
printQueue.put(output_string)
def galaxyFit2(inputQueue, printQueue, printlock):
for gal in iter(inputQueue.get, 'STOP'):
output_string = '{0[0]} {0[1]} {0[2]} {0[3]} {0[4]} {0[5]} ' + \
'{0[6]} {0[7]} {0[8]} {0[9]} {0[10]} {0[11]} ' + \
'{0[12]} {0[13]} {0[14]}'
j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
log_mass_min, log_mass_max = 7, 13
log_sfr_min, log_sfr_max = -3, 4
flux_obs = obs[gal,:]
flux_err = obs_err[gal,:]
#flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero
I = np.where(flux_err > 0.)[0] # Find bands with no observation
if len(I) == 0:
output_array = [gal+1, ID[gal], zobs[gal], z[j],
-99, -99, -99, -99, -99, -99, -99,
-99,-99,len(I),-99,'\n']
output = output_string.format(output_array)
if include_rest:
M_scaled = np.ones(len(flux_obs)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output = output + restframe_output + ' \n'
else:
output = output + ' \n'
printQueue.put(output_string)
continue
flux_obs = flux_obs[I] # and exclude from fit
flux_err = flux_err[I]
flux_models = f[j,I,:]
tot_err = np.sqrt(flux_err**2 + (params.flux_err*flux_obs)**2)
top = 0.
bottom = 0.
for i in range(len(flux_obs)):
top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2)
bottom += (flux_models[i,:]**2)/(tot_err[i]**2)
scale = top/bottom
scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc))
chisq = 0.
for i in range(len(flux_obs)):
chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(tot_err[i])**2)
chimin, minind = np.nanmin(chisq), np.nanargmin(chisq)
likelihood = np.reshape(np.exp(-0.5*chisq),
(n_metal, n_tg, n_tau, n_tauv, n_fesc))
likelihood[np.isnan(likelihood)] = 0.
likelihood = np.abs(likelihood/likelihood.sum())
if np.isinf(chimin) or np.isnan(minind):
output_array = [gal+1, ID[gal], zobs[gal], z[j],
-99, -99, -99, -99, -99, -99, -99,
-99,-99,len(I),-99,'\n']
output = output_string.format(output_array)
else:
#Find the coordinate of the model with the bestfit mass
mi, tgi, ti, tvi, fi = np.unravel_index(minind,
(n_metal, n_tg,
n_tau, n_tauv, n_fesc))
Masses = np.abs(np.log10(scale*flux_corr))
SFRs = np.abs(np.log10(scale * SFR * flux_corr))
mass_hist = np.histogram(Masses.flatten(),
range = (log_mass_min, log_mass_max),
bins = 120,
weights = likelihood.flatten(),
density = True)
sfr_hist = np.histogram(SFRs.flatten(),
range = (log_sfr_min, log_sfr_max),
bins = 140,
weights = likelihood.flatten(),
density = True)
Bestfit_Mass = np.abs(np.log10(scale[mi, tgi, ti, tvi, fi]*flux_corr))
Bestfit_SFR = np.abs(np.log10(scale[mi, tgi, ti, tvi, fi] *
SFR[mi, tgi, ti, tvi, fi]*flux_corr))
if np.isnan(Bestfit_Mass) or np.isinf(chimin):
Bestfit_Mass = -99
#M_scaled[:] = -99
tgs = -99
tvs = -99
taus = -99
mis = -99
escape_fraction = -99
else:
tgs = tg[tgi]/1e9
tvs = tv[tvi]
taus = tau[ti]
mis = metallicities[mi]
escape_fraction = fesc[fi]
m16, m50, m84 = weighted_quantile(Masses.flatten(),
[0.16, 0.5, 0.84],
sample_weight=likelihood.flatten(),
values_sorted=False)
s16, s50, s84 = weighted_quantile(SFRs.flatten(),
[0.16, 0.5, 0.84],
sample_weight=likelihood.flatten(),
values_sorted=False)
printlock.acquire()
MUV_scaled = -99.
Bestfit_Beta = -99.
print_string = "{0[0]:6d} {0[1]:8d} {0[2]:>5.2f} " + \
"{0[3]:>7.2f} {0[4]:>8.1f} {0[5]:>8.3f} " + \
"{0[6]:>5.1f} {0[7]:>8.2f} {0[8]:>4.2f} " + \
"{0[9]:>5.2f}"
print_array = [gal+1, ID[gal], zobs[gal],
Bestfit_Mass, chimin,
tgs, tvs, taus, mis,
Bestfit_SFR]
print(print_string.format(print_array))
output_string = '{n} {id} {zobs} {ztemp} {mass_best} {sfr_best} '+ \
'{chi_best} {tg} {tvs} {taus} {mis} {fesc} '+ \
'{mass_med} {mass_l68} {mass_u68} ' + \
'{sfr_med} {sfr_l68} {sfr_u68} ' + \
'{nfilts} '
output_values = {'n': gal+1,
'id': ID[gal],
'zobs': zobs[gal], 'ztemp':z[j],
'mass_best': Bestfit_Mass,
'sfr_best': Bestfit_SFR,
'chi_best': chimin,
'tg': tgs, 'tvs': tvs, 'taus': taus,
'mis': mis, 'fesc': escape_fraction,
'mass_med': m50, 'mass_l68': m16, 'mass_u68': m84,
'sfr_med': s50, 'sfr_l68': s16, 'sfr_u68': s84,
'nfilts': len(I)}
output_array = [gal+1, ID[gal], zobs[gal],
Bestfit_Mass, chimin, tgs, tvs, taus, mis,
MUV_scaled, minind, Bestfit_SFR, len(I), -99., '\n']
output = output_string.format(**output_values)
if include_rest:
if np.isinf(chimin) or np.isnan(minind):
M_scaled = np.ones(len(flux_obs)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output = output + restframe_output + ' \n'
else:
F_rest = np.array(f[0, :, mi, tgi, ti, tvi, fi] *
scale[mi, tgi, ti, tvi, fi] * flux_corr)
restframeMags = 23.9 - 2.5*np.log10(F_rest)
restframe_output = ' '.join(restframeMags.astype('str'))
output = output + restframe_output + ' \n'
else:
output = output + ' \n'
printlock.release()
printQueue.put([output, mass_hist, sfr_hist])
def galaxyFitPlus(inputQueue, printQueue, printlock):
for gal in iter(inputQueue.get, 'STOP'):
mass_range = 7, 13
log_sfr_min, log_sfr_max = -3, 4
j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
fo = obs[gal,:]
ferr = obs_err[gal,:]
flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero
#print fo
I = (ferr > 0.)*(ferr < 1e6) # Find bands with no observation
fo = flux_obs[I] # and exclude from fit
ferr = flux_err[I]
fm = f[I,j,:]
#print flux_models[:,0,0,0,0]
top = 0.
bottom = 0.
for i in range(len(fo)):
top += (flux_models[i,:]*flux_obs[i])/(flux_err[i]**2)
bottom += (flux_models[i,:]**2)/(flux_err[i]**2)
scale = top/bottom
scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc))
chisq = 0.
for i in range(len(fo)):
chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(flux_err[i])**2)
chimin, minind = np.nanmin(chisq), np.nanargmin(chisq)
chisq -= (chisq.min() - 1)
likelihood = np.exp(-0.5*chisq)
likelihood /= likelihood.sum()
if np.isinf(chimin) or np.isnan(minind) or len(fo) == 0:
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} \
{10} {11} {12} {13} {14} {15} {16} {17} {18}'.format(gal+1,ID[gal],zobs[gal],
-99,-99,-99,-99,-99,-99,
-99, -99, -99, -99,-99,-99,-99,
len(I),-99,'\n')
massLikelihood = np.zeros(mass_bins+1)
massLikelihood[0] = gal
muvLikelihood = np.zeros(muv_bins+1)
muvLikelihood[0] = gal
betaLikelihood = np.zeros(beta_bins+1)
betaLikelihood[0] = gal
#tauLikelihood = np.zeros(n_tau)
#tauLikelihood = np.insert(tauLikelihood,0,gal)
printQueue.put([output_string,massLikelihood,muvLikelihood,betaLikelihood])
continue
#Find the coordinate of the model with the bestfit mass
si,tgi,tvi,ti,mi = np.unravel_index(minind,(mass_bins,n_tg,n_tauv,n_tau,n_ssp))
Bestfit_Mass = np.log10(mass_range[si]*flux_corr)
Bestfit_SFR = (mass_range[si]*SFR[tgi,ti,mi]*flux_corr)
Bestfit_Beta = beta[tgi,tvi,ti,mi]
F_rest = f[:,0]*mass_range[likelihood.argmax(0)]*flux_corr
restframeMags = 23.9 - 2.5*np.log10(F_rest)
UV_rest = UV_flux[0]*mass_range[likelihood.argmax(0)]*flux_corr
restframeMUV = 23.9 - 2.5*np.log10(UV_rest)
Bestfit_restframeMags = restframeMags[:,tgi,tvi,ti,mi]
Bestfit_restframeMUV = restframeMUV[tgi,tvi,ti,mi]
if np.isnan(Bestfit_Mass) or np.isinf(chimin):
Bestfit_Mass = -99
#M_scaled[:] = -99
tgs = -99
tvs = -99
taus = -99
mis = -99
else:
tgs = tg[tgi]/1.e9
tvs = tv[tvi]
taus = tau[ti]/1.e9
mis = mi
"""
Likelihood array section:
"""
mass_hist = np.histogram(np.log10(mass_))
printlock.acquire()
if calc_mode:
print('{0:4d} {1:6d} {2:>6.2f} {3:>8.1f} {4:>6.2f}'.format(gal+1,ID[gal],Bestfit_Mass,chimin, np.log10(Mode_Mass), '/n'))
else:
print('{0:6d} {1:8f} {2:>5.2f} {3:>7.2f} {4:>8.1f} {5:>8.3f} {6:>5.1f} {7:>8.2f} {8:>3d} {9:>5.2f}'.format(gal+1,int(ID[gal]),zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,np.log10(Bestfit_SFR)))
output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}'.format(gal+1,int(ID[gal]),zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,Bestfit_restframeMags[tot],Bestfit_restframeMUV,minind,Bestfit_SFR,len(I),Bestfit_Beta,'\n')
printlock.release()
printQueue.put([output_string, massLikelihoods, muvLikelihoods, betaLikelihoods])
def getObservations(inputpath):
input_data = Table.read(inputpath,format=input_format)
column_names = list(input_data.columns.keys())
ID = input_data[ID_col]
zobs = input_data[z_col]
filter_names = []
k,l = 0,0
for ii in range(len(column_names)):
if column_names[ii].lower().endswith(flux_col_end.lower()):
if k == 0:
fluxes = input_data[column_names[ii]]
else:
fluxes = np.column_stack((fluxes,input_data[column_names[ii]]))
k+=1
filter_names.append(column_names[ii])
if column_names[ii].lower().endswith(fluxerr_col_end.lower()):
if l == 0:
fluxerrs = input_data[column_names[ii]]
else:
fluxerrs = np.column_stack((fluxerrs,input_data[column_names[ii]]))
l+=1
"""
if filts_used != None:
try:
fluxes = fluxes[:,filts_used]
fluxerrs = fluxerrs[:,filts_used]
except:r
print('Filter mismatch 1')
# Array slicing fail
"""
return ID, zobs, fluxes, fluxerrs, k, filter_names
class _function_wrapper(object):
"""
This is a hack to make the likelihood function pickleable when ``args``
or ``kwargs`` are also included.
Stolen from emcee
"""
def __init__(self, f, args, kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, x):
try:
return self.f(x, *self.args, **self.kwargs)
except:
import traceback
print("emcee: Exception while calling your likelihood function:")
print(" params:", x)
print(" args:", self.args)
print(" kwargs:", self.kwargs)
print(" exception:")
traceback.print_exc()
raise
def weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False, old_style=False):
""" Very close to np.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param values: np.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of initial array
:param old_style: if True, will correct output to be consistent with np.percentile.
:return: np.array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), 'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with np.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
if __name__ == '__main__':
logfile = open("error.log", "w")
original_stderr = sys.stderr
sys.stderr = logfile
start = time.time()
"""
SECTION 1
"""
model_path = params.model_path
input_catalog = params.input_catalog
input_format = params.input_format
z_col = params.z_col
ID_col = params.ID_col
flux_col_end = params.flux_col_end
fluxerr_col_end = params.fluxerr_col_end
ncpus = params.ncpus
filts_used = params.filts_used
include_rest = params.include_rest
output_path = params.output_catalog_path
output_format = params.output_format
output_hdf_path = params.output_hdf_path
calc_mode = params.fitting_mode
flux_corr = params.flux_corr
ID, zobs, obs, obs_err, filters_found, filter_names = getObservations(input_catalog)
"""
Section 2
"""
print("Loading synthetic mags and mass array:")
models = h5py.File(model_path, 'r')
tg = models['ages'].value
tv = models['dust'].value
tau = models['sfh'].value
metallicities = models['metallicities'].value
fesc = models['fesc'].value
Mshape = models['fluxes'].shape
z = models['z']
nfilts = Mshape[1]
n_metal = Mshape[2]
n_tg = Mshape[3]
n_tau = Mshape[4]
n_tauv = Mshape[5]
n_fesc = Mshape[6]
#UV_flux = synmags['UV_flux']
SFR = models['SFR']
Ms = models['Ms']
if (nfilts == filters_found) and (filts_used == None):
f = models['fluxes']
elif filts_used != None:
try:
f = models['fluxes'][:,filts_used]
obs = obs[:,filts_used]
obs_err = obs_err[:,filts_used]
filter_names = np.array(filter_names)[filts_used]
except:
print('Mis-match between model and observed filter numbers')
raise
# Slice fail
print ("Done.")
"""
SECTION 3
"""
if os.path.isfile(output_path+".temp_output.txt"):
os.remove(output_path+".temp_output.txt")
temp_file = open(output_path+".temp_output.txt","w")
"""
SECTION 4
Chi-sq calculation
"""
out_string = '{0:6s} {1:8s} {2:>5s} {3:>7s} {4:>8s} {5:>8s}' + \
'{6:>5s} {7:>8s} {8:>4s} {9:>5s}'
print(out_string.format('N','ID','zobs','Best', 'chimin',
'tg', 'tauv','tau','met', 'sfr'))
loop_start = time.time()
ncpus = np.clip(ncpus, 1, multiprocessing.cpu_count())
inputQueue = multiprocessing.Queue()
printQueue = multiprocessing.Queue()
printlock = multiprocessing.Lock()
if calc_mode == 'hist':
output_hdf = h5py.File(output_hdf_path, 'w')
output_hdf.create_dataset("mass_pdf", (len(ID), 120), dtype="f")
output_hdf.create_dataset("sfr_pdf", (len(ID), 140), dtype="f")
fitFunction = galaxyFit2
else:
fitFunction = galaxyFit
for i in range( ncpus ):
multiprocessing.Process(target = fitFunction,
args = (inputQueue, printQueue,
printlock)).start()
# Put elements in the send queue for processing
for gal in range( len(ID) ):
inputQueue.put( gal )
if calc_mode == 'hist':
for i, gal in enumerate(ID):
printout, mass_hist, sfr_hist = printQueue.get()
if i == 0:
mass_centers = 0.5*(mass_hist[1][1:] + mass_hist[1][:-1])
sfr_centers = 0.5*(sfr_hist[1][1:] + sfr_hist[1][:-1])
output_hdf.create_dataset("mass_bins", data = mass_centers)
output_hdf.create_dataset("sfr_bins", data = sfr_centers)
output_hdf["mass_pdf"][i] = mass_hist[0]
output_hdf["sfr_pdf"][i] = sfr_hist[0]
temp_file.write( printout )
#tau_array.tofile(tau_file)
else:
for i, gal in enumerate(ID):
printout = printQueue.get()
temp_file.write( printout )
#print len(mass_array), len(muv_array), len(beta_array)
# Stop all the running processes
for i in range( ncpus ):
inputQueue.put( 'STOP' )
# Close both send and receive queues
inputQueue.close()
printQueue.close()
temp_file.close()
models.close()
output_hdf.close()
print("Fitting time taken: {0:.2f} {1}".format(time.time()-loop_start,
'\n'))
"""
Section 3
Reload, format and save output table
"""
while temp_file.closed == False:
pause(0.1)
data = np.loadtxt(output_path+".temp_output.txt")
try:
rows, cols = data.shape
except:
cols = len(data)
output = Table()
names = ['N', 'ID', 'z', 'zmodel',
'Mass_best', 'SFR_best', 'chi_best',
'Age_best','Dust_best', 'SFH_best',
'Metallicity_best', 'fesc_best',
'Mass_median', 'Mass_l68', 'Mass_u68',
'SFR_median', 'SFR_l68', 'SFR_u68',
'Nfilts']
units = [None, None, None, None,
u.Msun, u.Msun/u.yr, None,
u.Gyr, None, None,
None, None,
u.Msun, u.Msun, u.Msun,
u.Msun/u.yr, u.Msun/u.yr, u.Msun/u.yr,
None]
types = ['i4', 'i4', 'f4', 'f4',
'f4', 'f4', 'f4',
'f4', 'f4', 'f4',
'f4', 'f4',
'f4', 'f4', 'f4',
'f4', 'f4', 'f4',
'i4']
if include_rest:
for name in filter_names:
names.append(name[:-len(flux_col_end)]+'_rest')
units.append(u.mag)
types.append('f4')
for col in range(cols):
column = Column( data[:,col], name = names[col], unit=units[col], dtype=types[col])
output.add_column(column)
table_format = 'ascii.commented_header'
output.sort('ID')
if os.path.isfile(output_path):
os.remove(output_path)
output.write(output_path,format=table_format)
print('Catalog saved')
os.remove(temp_file.name)
print()
print("Total time taken: "+str(time.time()-start))
sys.stderr = original_stderr
logfile.close()
| dunkenj/smpy | scripts/data/fitting.py | Python | mit | 27,412 | [
"Galaxy"
] | ee272494b27c37b581c56d670d69ac4c3d62ffcfe2bef74499adf8b1e3ee90e2 |
# Welcome script to be run asynchronously.
# This script is executed when the GaiaSandbox is first started.
# Created by Toni Sagrista
from gaia.cu9.ari.gaiaorbit.script import EventScriptingInterface
gs = EventScriptingInterface.instance()
# Disable input
gs.disableInput()
gs.cameraStop()
gs.minimizeInterfaceWindow()
# Welcome
gs.setHeadlineMessage("Welcome to the Gaia Sky")
gs.setSubheadMessage("Explore Gaia, the Solar System and the whole Galaxy!")
gs.sleep(2.5)
# Earth
gs.setHeadlineMessage("Earth")
gs.setSubheadMessage("This is our planet, the Earth")
gs.setCameraFocus("Earth")
gs.sleep(3.5)
# Sun
gs.setHeadlineMessage("Sun")
gs.setSubheadMessage("This is our star, the Sun")
gs.setCameraFocus("Sol")
gs.sleep(3.5)
# Back to Earth
gs.setCameraFocus("Earth")
# Maximize interface and enable input
gs.clearAllMessages()
gs.maximizeInterfaceWindow()
gs.enableInput()
| vga101/gaiasky | assets/scripts/welcome.py | Python | mpl-2.0 | 888 | [
"Galaxy"
] | 0f590354422e42628db8f2ffa331126b73ed442a9b8b7330ac3a7e42cdd28b27 |
import numpy as np
from mobility import mobility as mob
try:
from pyevtk.hl import gridToVTK
except ImportError:
pass
# Try to import the visit_writer (boost implementation)
try:
# import visit.visit_writer as visit_writer
from visit import visit_writer as visit_writer
except ImportError as e:
print(e)
pass
def plot_velocity_field(grid, r_vectors_blobs, lambda_blobs, blob_radius, eta, output, tracer_radius, *args, **kwargs):
'''
This function plots the velocity field to a grid using boost visit writer
'''
# Prepare grid values
grid = np.reshape(grid, (3,3)).T
grid_length = grid[1] - grid[0]
grid_points = np.array(grid[2], dtype=np.int32)
num_points = grid_points[0] * grid_points[1] * grid_points[2]
# Set grid coordinates
dx_grid = grid_length / grid_points
grid_x = np.array([grid[0,0] + dx_grid[0] * (x+0.5) for x in range(grid_points[0])])
grid_y = np.array([grid[0,1] + dx_grid[1] * (x+0.5) for x in range(grid_points[1])])
grid_z = np.array([grid[0,2] + dx_grid[2] * (x+0.5) for x in range(grid_points[2])])
# Be aware, x is the fast axis.
zz, yy, xx = np.meshgrid(grid_z, grid_y, grid_x, indexing = 'ij')
grid_coor = np.zeros((num_points, 3))
grid_coor[:,0] = np.reshape(xx, xx.size)
grid_coor[:,1] = np.reshape(yy, yy.size)
grid_coor[:,2] = np.reshape(zz, zz.size)
# Set radius of blobs (= a) and grid nodes (= 0)
radius_source = np.ones(r_vectors_blobs.size // 3) * blob_radius
radius_target = np.ones(grid_coor.size // 3) * tracer_radius
# Compute velocity field
mobility_vector_prod_implementation = kwargs.get('mobility_vector_prod_implementation')
print('mobility_vector_prod_implementation = ', mobility_vector_prod_implementation)
if mobility_vector_prod_implementation == 'python':
grid_velocity = mob.mobility_vector_product_source_target_one_wall(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
elif mobility_vector_prod_implementation == 'C++':
grid_velocity = mob.boosted_mobility_vector_product_source_target(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
elif mobility_vector_prod_implementation == 'numba_no_wall':
grid_velocity = mob.no_wall_mobility_trans_times_force_source_target_numba(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
else:
grid_velocity = mob.single_wall_mobility_trans_times_force_source_target_pycuda(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
# Prepara data for VTK writer
variables = [np.reshape(grid_velocity, grid_velocity.size)]
dims = np.array([grid_points[0]+1, grid_points[1]+1, grid_points[2]+1], dtype=np.int32)
nvars = 1
vardims = np.array([3])
centering = np.array([0])
varnames = ['velocity\0']
name = output + '.velocity_field.vtk'
grid_x = grid_x - dx_grid[0] * 0.5
grid_y = grid_y - dx_grid[1] * 0.5
grid_z = grid_z - dx_grid[2] * 0.5
grid_x = np.concatenate([grid_x, [grid[1,0]]])
grid_y = np.concatenate([grid_y, [grid[1,1]]])
grid_z = np.concatenate([grid_z, [grid[1,2]]])
# Write velocity field
visit_writer.boost_write_rectilinear_mesh(name, # File's name
0, # 0=ASCII, 1=Binary
dims, # {mx, my, mz}
grid_x, # xmesh
grid_y, # ymesh
grid_z, # zmesh
nvars, # Number of variables
vardims, # Size of each variable, 1=scalar, velocity=3*scalars
centering, # Write to cell centers of corners
varnames, # Variables' names
variables) # Variables
return
def plot_velocity_field_pyVTK(grid, r_vectors_blobs, lambda_blobs, blob_radius, eta, output, tracer_radius, *args, **kwargs):
'''
This function plots the velocity field to a grid using pyevtk
'''
# Prepare grid values
grid = np.reshape(grid, (3,3)).T
grid_length = grid[1] - grid[0]
grid_points = np.array(grid[2], dtype=np.int32)
num_points = grid_points[0] * grid_points[1] * grid_points[2]
# Set grid coordinates
dx_grid = grid_length / grid_points
grid_x = np.array([grid[0,0] + dx_grid[0] * (x+0.5) for x in range(grid_points[0])])
grid_y = np.array([grid[0,1] + dx_grid[1] * (x+0.5) for x in range(grid_points[1])])
grid_z = np.array([grid[0,2] + dx_grid[2] * (x+0.5) for x in range(grid_points[2])])
xx, yy, zz = np.meshgrid(grid_x, grid_y, grid_z, indexing = 'ij')
grid_coor = np.zeros((num_points, 3))
grid_coor[:,0] = np.reshape(xx, xx.size)
grid_coor[:,1] = np.reshape(yy, yy.size)
grid_coor[:,2] = np.reshape(zz, zz.size)
# Set radius of blobs (= a) and grid nodes (= 0)
radius_source = np.ones(r_vectors_blobs.size // 3) * blob_radius
radius_target = np.ones(grid_coor.size // 3) * tracer_radius
# Compute velocity field
mobility_vector_prod_implementation = kwargs.get('mobility_vector_prod_implementation')
print('mobility_vector_prod_implementation = ', mobility_vector_prod_implementation)
if mobility_vector_prod_implementation == 'python':
grid_velocity = mob.mobility_vector_product_source_target_one_wall(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
elif mobility_vector_prod_implementation == 'C++':
grid_velocity = mob.boosted_mobility_vector_product_source_target(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
elif mobility_vector_prod_implementation == 'numba_no_wall':
grid_velocity = mob.no_wall_mobility_trans_times_force_source_target_numba(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
else:
grid_velocity = mob.single_wall_mobility_trans_times_force_source_target_pycuda(r_vectors_blobs,
grid_coor,
lambda_blobs,
radius_source,
radius_target,
eta,
*args,
**kwargs)
# Prepara data for VTK writer
name = output + '.vtk'
print(name)
grid_velocity = np.reshape(grid_velocity,(num_points,3))
velx = np.ascontiguousarray(np.reshape(grid_velocity[:,0],(grid_points[0], grid_points[1], grid_points[2])))
vely = np.ascontiguousarray(np.reshape(grid_velocity[:,1],(grid_points[0], grid_points[1], grid_points[2])))
velz = np.ascontiguousarray(np.reshape(grid_velocity[:,2],(grid_points[0], grid_points[1], grid_points[2])))
grid_x = grid_x - dx_grid[0] * 0.5
grid_y = grid_y - dx_grid[1] * 0.5
grid_z = grid_z - dx_grid[2] * 0.5
grid_x = np.ascontiguousarray(np.concatenate([grid_x, [grid[1,0]]]))
grid_y = np.ascontiguousarray(np.concatenate([grid_y, [grid[1,1]]]))
grid_z = np.ascontiguousarray(np.concatenate([grid_z, [grid[1,2]]]))
gridToVTK(name,
grid_x,
grid_y,
grid_z,
cellData = {"velocity" : (velx, vely, velz)})
return
| stochasticHydroTools/RotationalDiffusion | plot/plot_velocity_field.py | Python | gpl-3.0 | 11,514 | [
"VTK",
"VisIt"
] | 14e4b6745cb958f462fc4ecb8fee0dc68b51b3a831ef7993ce5fae47591fe604 |
from .base import BaseChildResultsVisitor
from copy import copy
from nineml.exceptions import NineMLNotBoundException, NineMLUsageError
class Cloner(BaseChildResultsVisitor):
"""
A Cloner visitor that visits any NineML object (except Documents) and
creates a copy of the object
"""
def __init__(self, as_class=None, exclude_annotations=False,
clone_definitions=None, document=None,
random_seeds=False, validate=True, **kwargs): # @UnusedVariable @IgnorePep8
super(Cloner, self).__init__()
self.as_class = as_class if as_class is not None else type(None)
self.validate = validate
self.memo = {}
self.exclude_annotations = exclude_annotations
self.document = document
if clone_definitions is None:
if document is not None:
clone_definitions = 'local'
else:
clone_definitions = 'all'
elif clone_definitions == 'local' and document is None:
raise NineMLUsageError(
"'document' kwarg must be provided if clone_definitions is "
" set to 'local'")
self.clone_definitions = clone_definitions
self.random_seeds = random_seeds
self.refs = []
def clone(self, obj, **kwargs):
return self.visit(obj, **kwargs)
def visit(self, obj, nineml_cls=None, **kwargs):
"""
Before using the inherit visit method, the 'memo' cache is checked
for previously cloned objects by this cloner. This avoids problems
with circular references.
NB: Temporary objects generated when flattening a MultiDynamics object
(e.g. _NamespaceObject, _MultiRegime, MultiTransition), which can't
be referenced by their memory position as the memory is freed after
they go out of scope, are not saved in # the memo.
"""
if obj.temporary:
assert nineml_cls is not None or isinstance(obj, self.as_class)
id_ = None
else:
id_ = id(obj)
try:
# See if the attribute has already been cloned in memo
clone = self.memo[id_]
except KeyError:
clone = super(Cloner, self).visit(obj, nineml_cls=nineml_cls,
**kwargs)
# Clone annotations if they are present
if (hasattr(obj, 'annotations') and not self.exclude_annotations):
clone._annotations = self.visit(obj.annotations, **kwargs)
if not obj.temporary:
self.memo[id_] = clone
return clone
def default_action(self, obj, nineml_cls, child_results,
children_results, **kwargs): # @UnusedVariable @IgnorePep8
init_args = {}
for attr_name in nineml_cls.nineml_attr:
try:
init_args[attr_name] = getattr(obj, attr_name)
except NineMLNotBoundException:
init_args[attr_name] = None
for child_name in nineml_cls.nineml_child:
try:
init_args[child_name] = child_results[child_name]
except KeyError:
init_args[child_name] = None
for child_type in nineml_cls.nineml_children:
init_args[child_type._children_iter_name()] = children_results[
child_type]
if hasattr(nineml_cls, 'validate') and not self.validate:
init_args['validate'] = False
return nineml_cls(**init_args)
def action_definition(self, definition, nineml_cls, child_results,
children_results, **kwargs): # @UnusedVariable
if self.clone_definitions == 'all' or (
self.clone_definitions == 'local' and
definition._target.document is self.document):
target = child_results['target']
else:
target = definition.target
clone = nineml_cls(target=target)
self.refs.append(clone)
return clone
def action__connectivity(self, connectivity, nineml_cls, child_results,
children_results, **kwargs): # @UnusedVariable
if self.random_seeds:
random_seed = connectivity._seed
else:
random_seed = None
clone = nineml_cls(
child_results['rule_properties'],
random_seed=random_seed,
source_size=connectivity.source_size,
destination_size=connectivity.destination_size,
**kwargs)
return clone
def action_reference(self, reference, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
"""
Typically won't be called unless Reference is created and referenced
explicitly as the referenced object themselves is typically referred
to in the containing container.
"""
return copy(reference)
| INCF/lib9ML | nineml/visitors/cloner.py | Python | bsd-3-clause | 4,926 | [
"VisIt"
] | 6ada0cd743802b296e3c7d3f591283a839117abb261e27c52099803550759844 |
# -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains classes and functions for processing XSD content models.
"""
from __future__ import unicode_literals
from collections import Counter
from ..compat import PY3, MutableSequence
from ..exceptions import XMLSchemaValueError
from .exceptions import XMLSchemaModelError, XMLSchemaModelDepthError
from .xsdbase import ParticleMixin
MAX_MODEL_DEPTH = 15
"""Limit depth for safe visiting of models"""
XSD_GROUP_MODELS = {'sequence', 'choice', 'all'}
class ModelGroup(MutableSequence, ParticleMixin):
"""
Class for XSD model group particles. This class implements only model related methods,
schema element parsing and validation methods are implemented in derived classes.
"""
def __init__(self, model):
assert model in XSD_GROUP_MODELS, "Not a valid value for 'model'"
self._group = []
self.model = model
def __repr__(self):
return '%s(model=%r, occurs=%r)' % (self.__class__.__name__, self.model, self.occurs)
# Implements the abstract methods of MutableSequence
def __getitem__(self, i):
return self._group[i]
def __setitem__(self, i, item):
assert isinstance(item, (tuple, ParticleMixin)), "Items must be tuples or XSD particles"
self._group[i] = item
def __delitem__(self, i):
del self._group[i]
def __len__(self):
return len(self._group)
def insert(self, i, item):
assert isinstance(item, (tuple, ParticleMixin)), "Items must be tuples or XSD particles"
self._group.insert(i, item)
def __setattr__(self, name, value):
if name == 'model' and value is not None:
if value not in XSD_GROUP_MODELS:
raise XMLSchemaValueError("invalid model group %r." % value)
if self.model is not None and value != self.model and self.model != 'all':
raise XMLSchemaValueError("cannot change group model from %r to %r" % (self.model, value))
elif name == '_group':
if not all(isinstance(item, (tuple, ParticleMixin)) for item in value):
raise XMLSchemaValueError("XsdGroup's items must be tuples or ParticleMixin instances.")
super(ModelGroup, self).__setattr__(name, value)
def clear(self):
del self._group[:]
def is_emptiable(self):
if self.model == 'choice':
return self.min_occurs == 0 or not self or any([item.is_emptiable() for item in self])
else:
return self.min_occurs == 0 or not self or all([item.is_emptiable() for item in self])
def is_empty(self):
return not self._group or self.max_occurs == 0
def is_pointless(self, parent):
"""
Returns `True` if the group may be eliminated without affecting the model, `False` otherwise.
A group is pointless if one of those conditions is verified:
- the group is empty
- minOccurs == maxOccurs == 1 and the group has one child
- minOccurs == maxOccurs == 1 and the group and its parent have a sequence model
- minOccurs == maxOccurs == 1 and the group and its parent have a choice model
Ref: https://www.w3.org/TR/2004/REC-xmlschema-1-20041028/#coss-particle
:param parent: effective parent of the model group.
"""
if not self:
return True
elif self.min_occurs != 1 or self.max_occurs != 1:
return False
elif len(self) == 1:
return True
elif not isinstance(parent, ModelGroup):
return False
elif self.model == 'sequence' and parent.model != 'sequence':
return False
elif self.model == 'choice' and parent.model != 'choice':
return False
else:
return True
def has_occurs_restriction(self, other):
if not self:
return True
elif isinstance(other, ModelGroup):
return super(ModelGroup, self).has_occurs_restriction(other)
# Group particle compared to element particle
if self.max_occurs is None or any(e.max_occurs is None for e in self):
if other.max_occurs is not None:
return False
elif self.model == 'choice':
return self.min_occurs * min(e.min_occurs for e in self) >= other.min_occurs
else:
return self.min_occurs * sum(e.min_occurs for e in self) >= other.min_occurs
elif self.model == 'choice':
if self.min_occurs * min(e.min_occurs for e in self) < other.min_occurs:
return False
elif other.max_occurs is None:
return True
else:
return self.max_occurs * max(e.max_occurs for e in self) <= other.max_occurs
else:
if self.min_occurs * sum(e.min_occurs for e in self) < other.min_occurs:
return False
elif other.max_occurs is None:
return True
else:
return self.max_occurs * sum(e.max_occurs for e in self) <= other.max_occurs
def iter_model(self, depth=0):
"""
A generator function iterating elements and groups of a model group. Skips pointless groups,
iterating deeper through them. Raises `XMLSchemaModelDepthError` if the argument *depth* is
over `MAX_MODEL_DEPTH` value.
:param depth: guard for protect model nesting bombs, incremented at each deepest recursion.
"""
if depth > MAX_MODEL_DEPTH:
raise XMLSchemaModelDepthError(self)
for item in self:
if not isinstance(item, ModelGroup):
yield item
elif not item.is_pointless(parent=self):
yield item
else:
for obj in item.iter_model(depth + 1):
yield obj
def iter_elements(self, depth=0):
"""
A generator function iterating model's elements. Raises `XMLSchemaModelDepthError` if the
argument *depth* is over `MAX_MODEL_DEPTH` value.
:param depth: guard for protect model nesting bombs, incremented at each deepest recursion.
"""
if depth > MAX_MODEL_DEPTH:
raise XMLSchemaModelDepthError(self)
for item in self:
if isinstance(item, ModelGroup):
for e in item.iter_elements(depth + 1):
yield e
else:
yield item
def iter_subelements(self, depth=0):
if depth <= MAX_MODEL_DEPTH:
for item in self:
if isinstance(item, ModelGroup):
for e in item.iter_subelements(depth + 1):
yield e
else:
yield item
def check_model(self):
"""
Checks if the model group is deterministic. Types matching of same elements and Unique Particle
Attribution Constraint are checked. Raises an `XMLSchemaModelError` at first violated constraint.
"""
def safe_iter_path(group, depth):
if depth > MAX_MODEL_DEPTH:
raise XMLSchemaModelDepthError(group)
for item in group:
if isinstance(item, ModelGroup):
current_path.append(item)
for _item in safe_iter_path(item, depth + 1):
yield _item
current_path.pop()
else:
yield item
paths = {}
current_path = [self]
for e in safe_iter_path(self, 0):
for pe, previous_path in paths.values():
if pe.name == e.name and pe.name is not None and pe.type is not e.type:
raise XMLSchemaModelError(
self, "The model has elements with the same name %r but a different type" % e.name
)
elif not pe.overlap(e):
continue
elif pe is not e and pe.parent is e.parent:
if pe.parent.model in {'all', 'choice'}:
msg = "{!r} and {!r} overlap and are in the same {!r} group"
raise XMLSchemaModelError(self, msg.format(pe, e, pe.parent.model))
elif pe.min_occurs == pe.max_occurs:
continue
if not distinguishable_paths(previous_path + [pe], current_path + [e]):
raise XMLSchemaModelError(
self, "Unique Particle Attribution violation between {!r} and {!r}".format(pe, e)
)
paths[e.name] = e, current_path[:]
def distinguishable_paths(path1, path2):
"""
Checks if two model paths are distinguishable in a deterministic way, without looking forward
or backtracking. The arguments are lists containing paths from the base group of the model to
a couple of leaf elements. Returns `True` if there is a deterministic separation between paths,
`False` if the paths are ambiguous.
"""
e1, e2 = path1[-1], path2[-1]
for k, e in enumerate(path1):
if e not in path2:
depth = k - 1
break
else:
depth = 0
if path1[depth].max_occurs == 0:
return True
univocal1 = univocal2 = True
if path1[depth].model == 'sequence':
idx1 = path1[depth].index(path1[depth + 1])
idx2 = path2[depth].index(path2[depth + 1])
before1 = any(not e.is_emptiable() for e in path1[depth][:idx1])
after1 = before2 = any(not e.is_emptiable() for e in path1[depth][idx1 + 1:idx2])
after2 = any(not e.is_emptiable() for e in path1[depth][idx2 + 1:])
else:
before1 = after1 = before2 = after2 = False
for k in range(depth + 1, len(path1) - 1):
univocal1 &= path1[k].is_univocal()
if path1[k].model == 'sequence':
idx = path1[k].index(path1[k + 1])
before1 |= any(not e.is_emptiable() for e in path1[k][:idx])
after1 |= any(not e.is_emptiable() for e in path1[k][idx + 1:])
for k in range(depth + 1, len(path2) - 1):
univocal2 &= path2[k].is_univocal()
if path2[k].model == 'sequence':
idx = path2[k].index(path2[k + 1])
before2 |= any(not e.is_emptiable() for e in path2[k][:idx])
after2 |= any(not e.is_emptiable() for e in path2[k][idx + 1:])
if path1[depth].model != 'sequence':
return before1 and before2 or \
(before1 and (univocal1 and e1.is_univocal() or after1 or path1[depth].max_occurs == 1)) or \
(before2 and (univocal2 and e2.is_univocal() or after2 or path2[depth].max_occurs == 1))
elif path1[depth].max_occurs == 1:
return before2 or (before1 or univocal1) and (e1.is_univocal() or after1)
else:
return (before2 or (before1 or univocal1) and (e1.is_univocal() or after1)) and \
(before1 or (before2 or univocal2) and (e2.is_univocal() or after2))
class ModelVisitor(MutableSequence):
"""
A visitor design pattern class that can be used for validating XML data related to an XSD
model group. The visit of the model is done using an external match information,
counting the occurrences and yielding tuples in case of model's item occurrence errors.
Ends setting the current element to `None`.
:param root: the root ModelGroup instance of the model.
:ivar occurs: the Counter instance for keeping track of occurrences of XSD elements and groups.
:ivar element: the current XSD element, initialized to the first element of the model.
:ivar broken: a boolean value that records if the model is still usable.
:ivar group: the current XSD model group, initialized to *root* argument.
:ivar iterator: the current XSD group iterator.
:ivar items: the current XSD group unmatched items.
:ivar match: if the XSD group has an effective item match.
"""
def __init__(self, root):
self.root = root
self.occurs = Counter()
self._subgroups = []
self.element = None
self.broken = False
self.group, self.iterator, self.items, self.match = root, iter(root), root[::-1], False
self._start()
def __str__(self):
# noinspection PyCompatibility,PyUnresolvedReferences
return unicode(self).encode("utf-8")
def __unicode__(self):
return self.__repr__()
if PY3:
__str__ = __unicode__
def __repr__(self):
return '%s(root=%r)' % (self.__class__.__name__, self.root)
# Implements the abstract methods of MutableSequence
def __getitem__(self, i):
return self._subgroups[i]
def __setitem__(self, i, item):
self._subgroups[i] = item
def __delitem__(self, i):
del self._subgroups[i]
def __len__(self):
return len(self._subgroups)
def insert(self, i, item):
self._subgroups.insert(i, item)
def clear(self):
del self._subgroups[:]
self.occurs.clear()
self.element = None
self.broken = False
self.group, self.iterator, self.items, self.match = self.root, iter(self.root), self.root[::-1], False
def _start(self):
while True:
item = next(self.iterator, None)
if item is None or not isinstance(item, ModelGroup):
self.element = item
break
elif item:
self.append((self.group, self.iterator, self.items, self.match))
self.group, self.iterator, self.items, self.match = item, iter(item), item[::-1], False
@property
def expected(self):
"""
Returns the expected elements of the current and descendant groups.
"""
expected = []
for item in reversed(self.items):
if isinstance(item, ModelGroup):
expected.extend(item.iter_elements())
else:
expected.append(item)
expected.extend(item.maps.substitution_groups.get(item.name, ()))
return expected
def restart(self):
self.clear()
self._start()
def stop(self):
while self.element is not None:
for e in self.advance():
yield e
def advance(self, match=False):
"""
Generator function for advance to the next element. Yields tuples with
particles information when occurrence violation is found.
:param match: provides current element match.
"""
def stop_item(item):
"""
Stops element or group matching, incrementing current group counter.
:return: `True` if the item has violated the minimum occurrences for itself \
or for the current group, `False` otherwise.
"""
if isinstance(item, ModelGroup):
self.group, self.iterator, self.items, self.match = self.pop()
item_occurs = occurs[item]
model = self.group.model
if item_occurs:
self.match = True
if model == 'choice':
occurs[item] = 0
occurs[self.group] += 1
self.iterator, self.match = iter(self.group), False
else:
if model == 'all':
self.items.remove(item)
else:
self.items.pop()
if not self.items:
self.occurs[self.group] += 1
return item.is_missing(item_occurs)
elif model == 'sequence':
if self.match:
self.items.pop()
if not self.items:
occurs[self.group] += 1
return not item.is_emptiable()
elif item.is_emptiable():
self.items.pop()
return False
elif self.group.min_occurs <= occurs[self.group] or self:
return stop_item(self.group)
else:
self.items.pop()
return True
element, occurs = self.element, self.occurs
if element is None:
raise XMLSchemaValueError("cannot advance, %r is ended!" % self)
if match:
occurs[element] += 1
self.match = True
if not element.is_over(occurs[element]):
return
try:
if stop_item(element):
yield element, occurs[element], [element]
while True:
while self.group.is_over(occurs[self.group]):
stop_item(self.group)
obj = next(self.iterator, None)
if obj is None:
if not self.match:
if self.group.model == 'all' and all(e.min_occurs == 0 for e in self.items):
occurs[self.group] += 1
group, expected = self.group, self.items
if stop_item(group) and expected:
yield group, occurs[group], self.expected
elif not self.items:
self.iterator, self.items, self.match = iter(self.group), self.group[::-1], False
elif self.group.model == 'all':
self.iterator, self.match = iter(self.items), False
elif all(e.min_occurs == 0 for e in self.items):
self.iterator, self.items, self.match = iter(self.group), self.group[::-1], False
occurs[self.group] += 1
elif not isinstance(obj, ModelGroup): # XsdElement or XsdAnyElement
self.element, occurs[obj] = obj, 0
return
elif obj:
self.append((self.group, self.iterator, self.items, self.match))
self.group, self.iterator, self.items, self.match = obj, iter(obj), obj[::-1], False
occurs[obj] = 0
except IndexError:
self.element = None
if self.group.is_missing(occurs[self.group]) and self.items:
yield self.group, occurs[self.group], self.expected
| brunato/xmlschema | xmlschema/validators/models.py | Python | mit | 18,670 | [
"VisIt"
] | ff426da33f5cf45cfec8b6d7c4fdbc483b58e8f0c86bc93714137fe7fb11af59 |
# Copyright (C) 2012-2013 Wesley Baugh
#
# This work is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit:
# http://creativecommons.org/licenses/by-nc-sa/3.0/
#
# This file was created using the streaming example from Tweepy
# as a general guide. Tweepy is licensed under the MIT License
# and is Copyright (c) 2009-2010 Joshua Roesslein.
"""Collects all tweets from the sample Public stream using Twitter's
streaming API, and saves them to a file for later use as a corpus.
The sample Public stream "Returns a small random sample of all public
statuses. The Tweets returned by the default access level are the same,
so if two different clients connect to this endpoint, they will see the
same Tweets."
This module consumes tweets from the sample Public stream and putes them
on a queue. The tweets are then consumed from the queue by writing them
to a file in JSON format as sent by twitter, with one tweet per line.
This file can then be processed and filtered as necessary to create a
corpus of tweets for use with Machine Learning, Natural Language Processing,
and other Human-Centered Computing applications.
"""
from __future__ import print_function
import sys
import threading
import Queue
import time
import socket
import httplib
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from tweepy.utils import import_simplejson
json = import_simplejson()
# Configuration file that contains the Twitter API credentials.
CONFIG_FILE = 'config.yaml'
# Number of seconds to wait after an exception before restarting the stream.
tcpip_delay = 0.25
MAX_TCPIP_TIMEOUT = 16
http_delay = 5
MAX_HTTP_TIMEOUT = 320
class QueueListener(StreamListener):
"""Tweets received from the stream are stored in an internal queue.
Get tweets from the internal queue directly in order to process them.
For example, in this module the tweets are taken off the queue to be
written to a file, however they could just as easily be processed on
the fly.
"""
def __init__(self):
"""Creates a new stream listener with an internal queue for tweets."""
super(QueueListener, self).__init__()
self.num_handled = 0
self.queue = Queue.Queue()
def on_data(self, data):
"""Routes the raw stream data to the appropriate method."""
data_json = json.loads(data)
if 'in_reply_to_status_id' in data_json:
if self.on_status(data) is False:
return False
elif 'limit' in data_json:
if self.on_limit(data_json['limit']['track']) is False:
return False
return True
def on_status(self, data):
"""Puts each tweet in JSON format in the queue and increments count."""
# Note that in this overridden method 'data' is a string whereas
# in the default listener method this method is passed an object in the
# format of: status = Status.parse(self.api, json.loads(data))
self.queue.put(data)
self.num_handled += 1
def on_error(self, status):
"""Prints any error to the console but does not halt the stream."""
print('ON ERROR:', status, file=sys.stderr)
def on_limit(self, track):
"""Prints any limit notice to the console but doesn't halt.
Limit notices indicate that additional tweets matched a filter,
however they were above an artificial limit placed on the stream
and were not sent. The value of 'track' indicates how many tweets
in total matched the filter but were not sent since the stream
was opened.
"""
print('ON LIMIT:', track, file=sys.stderr)
def print_status(listener, seconds=5.0, last_count=0):
"""Call once to repeatedly display statistics every N seconds."""
num_handled = listener.num_handled
qsize = listener.queue.qsize()
t = threading.Timer(seconds, print_status, args=[listener, seconds,
num_handled])
t.daemon = True
t.start()
print('\nTOTAL TWEETS HANDLED:', num_handled, file=sys.stderr)
print(
'NUM IN THE PAST {seconds} seconds: {delta} ({rate} per sec)'.format(
seconds=seconds,
delta=num_handled - last_count,
rate=(num_handled - last_count) / seconds,
),
file=sys.stderr,
)
if qsize > 0:
print('QUEUE SIZE:', qsize, file=sys.stderr)
def worker(listener, flush_every=500):
"""Takes tweets off of the queue and writes them to a file."""
count = 0
while True:
data = listener.queue.get()
if data is None:
listener.queue.task_done()
break
try:
print(data)
except UnicodeDecodeError:
print(
'ERROR: UnicodeDecodeError ... continuing.',
file=sys.stderr,
)
count += 1
if count == flush_every:
sys.stdout.flush()
count = 0
listener.queue.task_done()
def main():
"""Connects to the stream and starts threads to write them to a file."""
listener = QueueListener()
ckey="i9m8JABm5zmB1scZDUg94SLf4"
csecret="Og7AXk99eXMF4v6sc0FTj3fEvfdDhkATzr3HZnNJksXCBPClpF"
atoken="258676850-2TSMkNpJq3mkp6SXXIDoHZvTtthnPkOgR3utaxME"
asecret="vz22ULf6FKowaDq2GIh0tL8K47t0nAXTeGRTjJg7txgig"
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
writer_thread = threading.Thread(target=worker, args=(listener,))
writer_thread.start()
stream = Stream(auth, listener).filter(track=["Desk Information Security", "Access Information Security",
"Fire Extinguisher Information Security", "Emergency Information Security", "Lightning resister Information Security",
"Lock Information Security", "Power Information Security", "Location Information Security", "Surveillance Information Security",
"Monitor Information Security", "Heating Ventillation Airconditioning Information Security",
"Alarm Information Security", "Floor Information Security", "Ceiling Information Security", "Rack Information Security",
"Server Security", "Storage Security",
"Alert Information Security", "Monitor Information Security", "Asset Information Security", "Incident Information Security",
"Policy Information Security", "People Information Security",
"Standard Information Security", "Procedure Information Security", "Governance Information Security",
"Contract Information Security", "Law Information Security", "Intellectual Property Rights Information Security",
"Metrics Information Security", "Testing Information Security", "Certificate Information Security",
"Compliance Information Security", "Regulation Information Security", "Business Continuity Information Security",
"Firewall ", "Network Time Protocol Security","Virtual Private Network ", "VPN", "Open Systems Interconnect Security", "Topology Security",
"Throughput Security", "Bandwidth Security", "Local Area Network Security", "LAN Security",
"Wide Area Network Security", "WAN Security", "Virtual Local Area Network Security",
"Demilitarized zone Network Security", "Domain Name System Security", "Internet Protocol V4 Security",
"Internet Protocol V6 Security", "IP Security", "IPV4 Security", "IPV6 Security", "Wireless Security",
"Internet Security", "Switch Network Security", "Router Network Security", "Multiplexer Network Security",
"Operating System Security", "Data Security", "Web Security", "Code Application Security", "Web Application Firewall", "Middle Tier Security",
"Account Security", "Authorization Security", "Authentication Security", "Cryptography",
"Computer Information Security", "Desktop Information Security",
"Laptop Information Security", "Thin Client Information Security", "Mobile Device Security",
"Projector Information Security", "Printer Information Security", "Keyboard Information Security",
"Mouse Information Security", "USB Information Security", "Anti-virus",
"IaaS Security", "PaaS Security", "SaaS Security","Virtualization Security", "Virtual Private Cloud", "VPC Security ",
"Crime Cyber", "Cyber Squatter", "Cyber Security", "Social Engineering Cyber", "Safety Cyber",
"deceptive software", "Injection Information",
"Tampering Information", "Repudiation Security", "Information disclosure", "hacking", "hactivism",
"adware", "spyware", "trojan Security", "zombie Security", "denial of service", "DOS attack",
"Distributed Denial of Service", "DDOS attack", "Cross site scripting", "XSS", "Cross Site Request Forgery",
"CSRF", "Buffer overflow", "sniffer Information Security", "spam", "spoofing", "Groupware", "Phishing",
"Smishing", "Vishing", "ransomware", "malware", "botnet"]
)
print_status(listener)
try:
while True:
try:
stream.sample() # blocking!
except KeyboardInterrupt:
print('KEYBOARD INTERRUPT', file=sys.stderr)
return
except (socket.error, httplib.HTTPException):
global tcpip_delay
print(
'TCP/IP Error: Restarting after {delay} seconds.'.format(
delay=tcpip_delay,
),
file=sys.stderr,
)
time.sleep(min(tcpip_delay, MAX_TCPIP_TIMEOUT))
tcpip_delay += 0.25
finally:
print('Disconnecting stream', file=sys.stderr)
stream.disconnect()
print('Waiting for last tweets to finish processing', file=sys.stderr)
# Send poison pill to writer thread and wait for it to exit
listener.queue.put(None)
listener.queue.join()
print('Waiting for writer thread to finish', file=sys.stderr)
writer_thread.join()
print('Exit successful', file=sys.stderr)
if __name__ == '__main__':
sys.exit(main())
| souravsarangi/SeedURLWork | TwitterURLs/twitter_corpus.py | Python | mit | 10,180 | [
"VisIt"
] | 63001ecb36f9c354d18c4357db65280bbcf9b80e580bf214f3fce489d5df9dee |
# Orca
#
# Copyright (C) 2010-2011 The Orca Team
# Copyright (C) 2011-2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (C) 2010-2011 The Orca Team" \
"Copyright (C) 2011-2012 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import pyatspi.utils as utils
import orca.scripts.default as default
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.guilabels as guilabels
import orca.input_event as input_event
import orca.messages as messages
import orca.orca as orca
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speechserver as speechserver
import orca.orca_state as orca_state
import orca.speech as speech
import orca.structural_navigation as structural_navigation
from .braille_generator import BrailleGenerator
from .speech_generator import SpeechGenerator
from .script_utilities import Utilities
_settingsManager = settings_manager.getManager()
########################################################################
# #
# The WebKitGtk script class. #
# #
########################################################################
class Script(default.Script):
def __init__(self, app):
"""Creates a new script for WebKitGtk applications.
Arguments:
- app: the application to create a script for.
"""
super().__init__(app)
self._loadingDocumentContent = False
self._lastCaretContext = None, -1
self.sayAllOnLoadCheckButton = None
if _settingsManager.getSetting('sayAllOnLoad') == None:
_settingsManager.setSetting('sayAllOnLoad', True)
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings."""
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers.update(
self.structuralNavigation.inputEventHandlers)
self.inputEventHandlers["sayAllHandler"] = \
input_event.InputEventHandler(
Script.sayAll,
cmdnames.SAY_ALL)
self.inputEventHandlers["panBrailleLeftHandler"] = \
input_event.InputEventHandler(
Script.panBrailleLeft,
cmdnames.PAN_BRAILLE_LEFT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["panBrailleRightHandler"] = \
input_event.InputEventHandler(
Script.panBrailleRight,
cmdnames.PAN_BRAILLE_RIGHT,
False) # Do not enable learn mode for this action
def getToolkitKeyBindings(self):
"""Returns the toolkit-specific keybindings for this script."""
return self.structuralNavigation.keyBindings
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application."""
from gi.repository import Gtk
grid = Gtk.Grid()
grid.set_border_width(12)
label = guilabels.READ_PAGE_UPON_LOAD
self.sayAllOnLoadCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.sayAllOnLoadCheckButton.set_active(
_settingsManager.getSetting('sayAllOnLoad'))
grid.attach(self.sayAllOnLoadCheckButton, 0, 0, 1, 1)
grid.show_all()
return grid
def getPreferencesFromGUI(self):
"""Returns a dictionary with the app-specific preferences."""
return {'sayAllOnLoad': self.sayAllOnLoadCheckButton.get_active()}
def getBrailleGenerator(self):
"""Returns the braille generator for this script."""
return BrailleGenerator(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getEnabledStructuralNavigationTypes(self):
"""Returns a list of the structural navigation object types
enabled in this script."""
return [structural_navigation.StructuralNavigation.BLOCKQUOTE,
structural_navigation.StructuralNavigation.BUTTON,
structural_navigation.StructuralNavigation.CHECK_BOX,
structural_navigation.StructuralNavigation.CHUNK,
structural_navigation.StructuralNavigation.CLICKABLE,
structural_navigation.StructuralNavigation.COMBO_BOX,
structural_navigation.StructuralNavigation.ENTRY,
structural_navigation.StructuralNavigation.FORM_FIELD,
structural_navigation.StructuralNavigation.HEADING,
structural_navigation.StructuralNavigation.IMAGE,
structural_navigation.StructuralNavigation.LANDMARK,
structural_navigation.StructuralNavigation.LINK,
structural_navigation.StructuralNavigation.LIST,
structural_navigation.StructuralNavigation.LIST_ITEM,
structural_navigation.StructuralNavigation.LIVE_REGION,
structural_navigation.StructuralNavigation.PARAGRAPH,
structural_navigation.StructuralNavigation.RADIO_BUTTON,
structural_navigation.StructuralNavigation.SEPARATOR,
structural_navigation.StructuralNavigation.TABLE,
structural_navigation.StructuralNavigation.TABLE_CELL,
structural_navigation.StructuralNavigation.UNVISITED_LINK,
structural_navigation.StructuralNavigation.VISITED_LINK]
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def onCaretMoved(self, event):
"""Callback for object:text-caret-moved accessibility events."""
if self._inSayAll:
return
if not self.utilities.isWebKitGtk(event.source):
super().onCaretMoved(event)
return
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey in ['Tab', 'ISO_Left_Tab']:
return
if lastKey == 'Down' \
and orca_state.locusOfFocus == event.source.parent \
and event.source.getIndexInParent() == 0 \
and orca_state.locusOfFocus.getRole() == pyatspi.ROLE_LINK:
self.updateBraille(event.source)
return
self.utilities.setCaretContext(event.source, event.detail1)
super().onCaretMoved(event)
def onDocumentReload(self, event):
"""Callback for document:reload accessibility events."""
if self.utilities.treatAsBrowser(event.source):
self._loadingDocumentContent = True
def onDocumentLoadComplete(self, event):
"""Callback for document:load-complete accessibility events."""
if not self.utilities.treatAsBrowser(event.source):
return
self._loadingDocumentContent = False
# TODO: We need to see what happens in Epiphany on pages where focus
# is grabbed rather than set the caret at the start. But for simple
# content in both Yelp and Epiphany this is alright for now.
obj, offset = self.utilities.setCaretAtStart(event.source)
self.utilities.setCaretContext(obj, offset)
self.updateBraille(obj)
if _settingsManager.getSetting('sayAllOnLoad') \
and _settingsManager.getSetting('enableSpeech'):
self.sayAll(None)
def onDocumentLoadStopped(self, event):
"""Callback for document:load-stopped accessibility events."""
if self.utilities.treatAsBrowser(event.source):
self._loadingDocumentContent = False
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if self._inSayAll or not event.detail1:
return
if not self.utilities.isWebKitGtk(event.source):
super().onFocusedChanged(event)
return
contextObj, offset = self.utilities.getCaretContext()
if event.source == contextObj:
return
obj = event.source
role = obj.getRole()
textRoles = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TABLE_CELL]
if role in textRoles \
or (role == pyatspi.ROLE_LIST_ITEM and obj.childCount):
return
super().onFocusedChanged(event)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
obj = event.source
try:
role = obj.getRole()
name = obj.name
except:
return
if not self.utilities.treatAsBrowser(obj):
return
if event.detail1:
self.presentMessage(messages.PAGE_LOADING_START)
elif name:
self.presentMessage(messages.PAGE_LOADING_END_NAMED % name)
else:
self.presentMessage(messages.PAGE_LOADING_END)
def sayCharacter(self, obj):
"""Speak the character at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayCharacter(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_CHAR
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
if string:
self.speakCharacter(string)
else:
speech.speak(self.speechGenerator.generateSpeech(obj))
def sayWord(self, obj):
"""Speaks the word at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayWord(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
self.sayPhrase(obj, start, end)
def sayLine(self, obj):
"""Speaks the line at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayLine(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
self.sayPhrase(obj, start, end)
# TODO: Move these next items into the speech generator.
if obj.getRole() == pyatspi.ROLE_PANEL \
and obj.getIndexInParent() == 0:
obj = obj.parent
rolesToSpeak = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK]
if obj.getRole() in rolesToSpeak:
speech.speak(self.speechGenerator.getRoleName(obj))
def sayPhrase(self, obj, startOffset, endOffset):
"""Speaks the text of an Accessible object between the given offsets.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
- startOffset: the start text offset.
- endOffset: the end text offset.
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayPhrase(self, obj, startOffset, endOffset)
return
phrase = self.utilities.substring(obj, startOffset, endOffset)
if len(phrase) and phrase != "\n":
if phrase.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
phrase = self.utilities.adjustForRepeats(phrase)
links = [x for x in obj if x.getRole() == pyatspi.ROLE_LINK]
if links:
phrase = self.utilities.adjustForLinks(obj, phrase, startOffset)
speech.speak(phrase, voice)
else:
# Speak blank line if appropriate.
#
self.sayCharacter(obj)
def skipObjectEvent(self, event):
"""Gives us, and scripts, the ability to decide an event isn't
worth taking the time to process under the current circumstances.
Arguments:
- event: the Event
Returns True if we shouldn't bother processing this object event.
"""
if event.type.startswith('object:state-changed:focused') \
and event.detail1:
if event.source.getRole() == pyatspi.ROLE_LINK:
return False
return default.Script.skipObjectEvent(self, event)
def useStructuralNavigationModel(self):
"""Returns True if we should do our own structural navigation.
This should return False if we're in a form field, or not in
document content.
"""
doNotHandleRoles = [pyatspi.ROLE_ENTRY,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU_ITEM]
if not self.structuralNavigation.enabled:
return False
if not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return False
states = orca_state.locusOfFocus.getState()
if states.contains(pyatspi.STATE_EDITABLE):
return False
role = orca_state.locusOfFocus.getRole()
if role in doNotHandleRoles:
if role == pyatspi.ROLE_LIST_ITEM:
return not states.contains(pyatspi.STATE_SELECTABLE)
if states.contains(pyatspi.STATE_FOCUSED):
return False
return True
def panBrailleLeft(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or not self.isBrailleBeginningShowing() \
or not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.panBrailleLeft(self, inputEvent, panAmount)
obj = self.utilities.findPreviousObject(orca_state.locusOfFocus)
orca.setLocusOfFocus(None, obj, notifyScript=False)
self.updateBraille(obj)
# Hack: When panning to the left in a document, we want to start at
# the right/bottom of each new object. For now, we'll pan there.
# When time permits, we'll give our braille code some smarts.
while self.panBrailleInDirection(panToLeft=False):
pass
self.refreshBraille(False)
return True
def panBrailleRight(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or not self.isBrailleEndShowing() \
or not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.panBrailleRight(self, inputEvent, panAmount)
obj = self.utilities.findNextObject(orca_state.locusOfFocus)
orca.setLocusOfFocus(None, obj, notifyScript=False)
self.updateBraille(obj)
# Hack: When panning to the right in a document, we want to start at
# the left/top of each new object. For now, we'll pan there. When time
# permits, we'll give our braille code some smarts.
while self.panBrailleInDirection(panToLeft=True):
pass
self.refreshBraille(False)
return True
def sayAll(self, inputEvent, obj=None, offset=None):
"""Speaks the contents of the document beginning with the present
location. Overridden in this script because the sayAll could have
been started on an object without text (such as an image).
"""
obj = obj or orca_state.locusOfFocus
if not self.utilities.isWebKitGtk(obj):
return default.Script.sayAll(self, inputEvent, obj, offset)
speech.sayAll(self.textLines(obj, offset),
self.__sayAllProgressCallback)
return True
def getTextSegments(self, obj, boundary, offset=0):
segments = []
text = obj.queryText()
length = text.characterCount
string, start, end = text.getTextAtOffset(offset, boundary)
while string and offset < length:
string = self.utilities.adjustForRepeats(string)
voice = self.speechGenerator.getVoiceForString(obj, string)
string = self.utilities.adjustForLinks(obj, string, start)
# Incrementing the offset should cause us to eventually reach
# the end of the text as indicated by a 0-length string and
# start and end offsets of 0. Sometimes WebKitGtk returns the
# final text segment instead.
if segments and [string, start, end, voice] == segments[-1]:
break
segments.append([string, start, end, voice])
offset = end + 1
string, start, end = text.getTextAtOffset(offset, boundary)
return segments
def textLines(self, obj, offset=None):
"""Creates a generator that can be used to iterate over each line
of a text object, starting at the caret offset.
Arguments:
- obj: an Accessible that has a text specialization
Returns an iterator that produces elements of the form:
[SayAllContext, acss], where SayAllContext has the text to be
spoken and acss is an ACSS instance for speaking the text.
"""
self._sayAllIsInterrupted = False
self._inSayAll = False
if not obj:
return
if obj.getRole() == pyatspi.ROLE_LINK:
obj = obj.parent
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
document = utils.findAncestor(obj, lambda x: x.getRole() in docRoles)
if not document or document.getState().contains(pyatspi.STATE_BUSY):
return
allTextObjs = utils.findAllDescendants(
document, lambda x: x and 'Text' in utils.listInterfaces(x))
allTextObjs = allTextObjs[allTextObjs.index(obj):len(allTextObjs)]
textObjs = [x for x in allTextObjs if x.parent not in allTextObjs]
if not textObjs:
return
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
sayAllStyle = _settingsManager.getSetting('sayAllStyle')
if sayAllStyle == settings.SAYALL_STYLE_SENTENCE:
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
self._inSayAll = True
offset = textObjs[0].queryText().caretOffset
for textObj in textObjs:
textSegments = self.getTextSegments(textObj, boundary, offset)
roleInfo = self.speechGenerator.getRoleName(textObj)
if roleInfo:
roleName, voice = roleInfo
textSegments.append([roleName, 0, -1, voice])
for (string, start, end, voice) in textSegments:
context = speechserver.SayAllContext(textObj, string, start, end)
self._sayAllContexts.append(context)
yield [context, voice]
offset = 0
self._inSayAll = False
self._sayAllContexts = []
def __sayAllProgressCallback(self, context, progressType):
if progressType == speechserver.SayAllContext.PROGRESS:
return
obj = context.obj
orca.setLocusOfFocus(None, obj, notifyScript=False)
offset = context.currentOffset
text = obj.queryText()
if progressType == speechserver.SayAllContext.INTERRUPTED:
self._sayAllIsInterrupted = True
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
lastKey = orca_state.lastInputEvent.event_string
if lastKey == "Down" and self._fastForwardSayAll(context):
return
elif lastKey == "Up" and self._rewindSayAll(context):
return
self._inSayAll = False
self._sayAllContexts = []
if not self._lastCommandWasStructNav:
text.setCaretOffset(offset)
return
# SayAllContext.COMPLETED doesn't necessarily mean done with SayAll;
# just done with the current object. If we're still in SayAll, we do
# not want to set the caret (and hence set focus) in a link we just
# passed by.
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
pass
else:
linkCount = hypertext.getNLinks()
links = [hypertext.getLink(x) for x in range(linkCount)]
if [l for l in links if l.startIndex <= offset <= l.endIndex]:
return
text.setCaretOffset(offset)
def getTextLineAtCaret(self, obj, offset=None, startOffset=None, endOffset=None):
"""To-be-removed. Returns the string, caretOffset, startOffset."""
textLine = super().getTextLineAtCaret(obj, offset, startOffset, endOffset)
string = textLine[0]
if string and string.find(self.EMBEDDED_OBJECT_CHARACTER) == -1 \
and obj.getState().contains(pyatspi.STATE_FOCUSED):
return textLine
textLine[0] = self.utilities.displayedText(obj)
try:
text = obj.queryText()
except:
pass
else:
textLine[1] = min(textLine[1], text.characterCount)
return textLine
def updateBraille(self, obj, extraRegion=None):
"""Updates the braille display to show the given object.
Arguments:
- obj: the Accessible
- extra: extra Region to add to the end
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update disabled")
return
if not obj:
return
if not self.utilities.isWebKitGtk(obj) \
or (not self.utilities.isInlineContainer(obj) \
and not self.utilities.isTextListItem(obj)):
default.Script.updateBraille(self, obj, extraRegion)
return
brailleLine = self.getNewBrailleLine(clearBraille=True, addLine=True)
for child in obj:
if not self.utilities.onSameLine(child, obj[0]):
break
[regions, fRegion] = self.brailleGenerator.generateBraille(child)
self.addBrailleRegionsToLine(regions, brailleLine)
if not brailleLine.regions:
[regions, fRegion] = self.brailleGenerator.generateBraille(
obj, role=pyatspi.ROLE_PARAGRAPH)
self.addBrailleRegionsToLine(regions, brailleLine)
self.setBrailleFocus(fRegion)
if extraRegion:
self.addBrailleRegionToLine(extraRegion, brailleLine)
self.refreshBraille()
| pvagner/orca | src/orca/scripts/toolkits/WebKitGtk/script.py | Python | lgpl-2.1 | 24,333 | [
"ORCA"
] | c63531abe1a0d4f20864c9f47b1de7e8d69c1a9eaf6c507aa8899eca4396b5dc |
"""
Grab IRAC bands 3,4 and MIPS mosaics from the web
"""
import os
from time import time
from urllib import urlopen
from multiprocessing import Pool
from functools import partial
def get_url(url, fname, target_dir='galaxy/raw/'):
""" Save a file from the internet
Parameters
----------
url : The url to download
fname : The filename to save to
target_dir : The directory to save into. Default= galaxy/raw
"""
outfile = os.path.join(target_dir + fname)
if os.path.exists(outfile) and (os.path.getsize(outfile) > 0):
return
print '+%s' % (url + fname)
data = urlopen(url + fname)
if data.getcode() != 200:
print '%s returned status code %i' % (url + fname, data.getcode())
return
t0 = time()
with open(outfile, 'wb') as out:
out.write(data.read())
t1 = time()
print 'Downloaded in %i seconds' % (t1 - t0)
def get_glimpse(folder, lon, survey='I'):
"""Download GLIMPSE images I3 and I4 for a single longitude
Parameters
----------
folder : Sub-directory on IPAC website (e.g. 'GLON_10-30')
lon : Longitude tile to grab
survey : Optional survey ('I', 'II')
"""
base_url = 'http://irsa.ipac.caltech.edu/data/SPITZER/GLIMPSE/images/%s/1.2_mosaics_v3.5/' % survey
i3 = 'GLM_%3.3i00+0000_mosaic_I3.fits' % lon
i4 = 'GLM_%3.3i00+0000_mosaic_I4.fits' % lon
get_url(base_url + folder, i3)
get_url(base_url + folder, i4)
def get_mips(lon):
""" Download MIPS images at b = +/- .5deg
Parameters
----------
lon : Longitude to grab
"""
base_url = 'http://irsa.ipac.caltech.edu/data/SPITZER/MIPSGAL/images/mosaics24/'
pos = 'MG%3.3i0p005_024.fits' % lon
neg = 'MG%3.3i0n005_024.fits' % lon
get_url(base_url, pos)
get_url(base_url, neg)
def get_all(threads=5):
""" Grab all the data
Parameters
----------
Threads : how many worker threads to use
"""
p = Pool(threads)
lon = range(12, 30, 3)
folder = 'GLON_10-30/'
p.map(partial(get_glimpse, folder), lon)
lon = range(30, 52, 3)
folder = 'GLON_30-53/'
p.map(partial(get_glimpse, folder), lon)
lon = range(33, 67, 3)
folder = 'GLON_53-66/'
p.map(partial(get_glimpse, folder), lon)
lon = range(294, 310, 3)
folder = 'GLON_284_295-310/'
p.map(partial(get_glimpse, folder), lon)
lon = range(312, 328, 3)
folder = 'GLON_310-330/'
p.map(partial(get_glimpse, folder), lon)
lon = range(330, 349, 3)
folder = 'GLON_330-350/'
p.map(partial(get_glimpse, folder), lon)
lon = range(0, 10, 3)
folder = ''
p.map(partial(get_glimpse, folder, survey='II'), lon)
lon = range(351, 358, 3)
folder = ''
p.map(partial(get_glimpse, folder, survey='II'), lon)
#XXX GLIMPSE 3D
lon = range(70)
p.map(get_mips, lon)
lon = range(292, 361)
p.map(get_mips, lon)
if __name__ == "__main__":
get_all()
| ChrisBeaumont/brut | bubbly/data/scrape.py | Python | mit | 2,957 | [
"Galaxy"
] | bb1923caf45c9264bb5e12450d7ecf7af68da1ec81aee05cfea36c5f126f91d2 |
#!/usr/bin/env python3
import argparse
import atexit
import os
import os.path
import signal
from kalplib import knpm
from kalplib import kutils
from kalplib import kwatchdog
from jnscommons import jnsos
VERSION = '1.2.2'
THREAD_WAIT_TIMEOUT = 1.0 # in seconds
watchdogs = [] # pylint: disable=invalid-name
watchdogs_terminated = False # pylint: disable=invalid-name
def main():
atexit.register(terminate_watchdogs)
signal.signal(signal.SIGINT, lambda signum, frame: terminate_watchdogs())
opts = parse_args()
maybe_do_npm_installs(opts)
start_processes(opts)
wait_on_watchdogs()
# ################# #
# Options Functions #
# ################# #
def parse_args():
desc = "A utility to help manage `gulp' and `karma' in development enviornments\nVersion: {}".format(VERSION)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--dry-run', action='store_true', default=False, dest='dry_run',
help='Just output what actions will be peformed without actually performing them')
parser.add_argument('-G', '--no-gulp', action='store_true', default=False, dest='no_gulp',
help="Do not start the `karma' subprocess (default: %(default)s)")
parser.add_argument('-K', '--no-karma', action='store_true', default=False, dest='no_karma',
help="Do not start the `gulp watch' subprocess (default: %(default)s)")
parser.add_argument('-R', '--no-restart', action='store_true', default=False, dest='no_restart',
help="Do not restart the subprocesses when they die prematurely (default: %(default)s)")
parser.add_argument('-r', '--root', action='append', default=[], metavar='ROOT', dest='roots',
help='Specify a root directory (default: .)')
opts = parser.parse_args()
opts.roots = [os.getcwd()] if not opts.roots else opts.roots
validate_directories(opts.roots)
opts.roots = [os.path.abspath(p) for p in opts.roots]
return opts
def validate_directories(directories):
for directory in directories:
if not os.path.exists(directory):
raise FileNotFoundError(kutils.format_error('Root directory does not exist: {}'.format(directory)))
elif not os.path.isdir(directory):
raise NotADirectoryError(kutils.format_error('Root directory is not a directory: {}'.format(directory)))
# ##################### #
# NPM Install Functions #
# ##################### #
def maybe_do_npm_installs(opts):
for root in opts.roots:
maybe_do_npm_install(root, opts)
def maybe_do_npm_install(root, opts):
package_json_dir, package_json = knpm.get_package_json(root)
uninstalled_deps = knpm.get_uninstalled_dependencies(package_json_dir, package_json)
out_of_date_deps = knpm.get_out_of_date_dependencies(package_json_dir, package_json)
has_uninstalled_deps = len(uninstalled_deps) > 0
has_out_of_date_deps = len(out_of_date_deps) > 0
if has_uninstalled_deps or has_out_of_date_deps:
kutils.print_titled('npm install required: ', [kutils.BOLD, kutils.CYAN], root, [kutils.BOLD])
if has_uninstalled_deps:
kutils.print_titled(
'uninstalled dependencies: ', [kutils.BOLD, kutils.MAGENTA], ', '.join(uninstalled_deps), [])
if has_out_of_date_deps:
kutils.print_titled(
'out of date dependencies: ', [kutils.BOLD, kutils.MAGENTA], ', '.join(out_of_date_deps), [])
if not opts.dry_run:
knpm.npm_install(package_json_dir)
# ####################### #
# Process Mgmt. Functions #
# ####################### #
def start_processes(opts):
for root in opts.roots:
start_processes_for_root(opts, root)
def start_processes_for_root(opts, root):
process_count = 0
kutils.print_titled('starting subprocesses: ', [kutils.BOLD, kutils.CYAN], root, [kutils.BOLD])
if not opts.no_gulp:
start_gulp_process(opts, root)
process_count += 1
if not opts.no_karma:
start_karma_process(opts, root)
process_count += 1
if process_count == 0:
kutils.print_formatted('none', kutils.BOLD, kutils.RED)
def start_gulp_process(opts, cwd):
cmd = ['gulp.cmd'] if jnsos.is_windows() else ['gulp']
cmd.append('watch')
start_watchdog(opts=opts, cmd=cmd, cwd=cwd)
def start_karma_process(opts, cwd):
karma_conf = 'karma.conf.js'
karma_conf_dir = kutils.find_file_up_hierarchy(cwd, karma_conf)
if karma_conf_dir is None:
raise FileNotFoundError(kutils.format_error(
'Could not start karma because "{}" could not be found.'.format(karma_conf)))
cmd = ['karma.cmd'] if jnsos.is_windows() else ['karma']
cmd.append('start')
start_watchdog(opts=opts, cmd=cmd, cwd=karma_conf_dir)
def start_watchdog(opts, cmd, cwd):
watchdog = kwatchdog.WatchdogThread(
cmd=cmd, cwd=cwd, keep_alive=not opts.no_restart, dry_run=opts.dry_run, wait_timeout=THREAD_WAIT_TIMEOUT)
watchdog.start()
watchdogs.append(watchdog)
def wait_on_watchdogs():
for watchdog in watchdogs:
while not watchdogs_terminated and watchdog.is_alive():
watchdog.join(timeout=THREAD_WAIT_TIMEOUT)
def terminate_watchdogs():
global watchdogs_terminated # pylint: disable=global-statement,invalid-name
for watchdog in watchdogs:
terminate_watchdog(watchdog)
watchdogs_terminated = True
def terminate_watchdog(watchdog):
terminated = False
if watchdog.is_alive():
kutils.print_titled('killing process: ', [kutils.BOLD, kutils.RED], ' '.join(watchdog.cmd), [kutils.BOLD])
watchdog.terminate()
terminated = True
return terminated
# ########## #
# Main Check #
# ########## #
if __name__ == '__main__':
main()
| eviljoe/junk-n-stuff | src/kalp.py | Python | mit | 5,951 | [
"GULP"
] | 7d6ae9750c6356a53a079799613498f1edbce2d0d1db2e8efa19006b64dd3ecb |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class RegExpName(Rule):
"""Rule that checks for full or partial name matches"""
labels = [_('Text:')]
name = _('People with a name matching <text>')
description = _("Matches people's names containing a substring or "
"matching a regular expression")
category = _('General filters')
allow_regex = True
def apply(self,db,person):
for name in [person.get_primary_name()] + person.get_alternate_names():
for field in [name.first_name, name.get_surname(), name.suffix,
name.title, name.nick, name.famnick, name.call]:
if self.match_substring(0, field):
return True
return False
| beernarrd/gramps | gramps/gen/filters/rules/person/_regexpname.py | Python | gpl-2.0 | 2,199 | [
"Brian"
] | ea5a065a9d1aadd5f2a1ab7285fa65b00fefccef865b61c9e4373382e441de24 |
""" Module that expose the base class for DIRAC Clients.
This class exposes possible RPC calls, given a url of a service.
"""
import ast
from functools import partial
try:
from functools import partialmethod
except ImportError:
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance, *(self.args or ()), **(self.keywords or {}))
import importlib_resources
from DIRAC.Core.Tornado.Client.ClientSelector import RPCClientSelector
from DIRAC.Core.Tornado.Client.TornadoClient import TornadoClient
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
from DIRAC.Core.Utilities.Decorators import deprecated
from DIRAC.Core.DISET import DEFAULT_RPC_TIMEOUT
class partialmethodWithDoc(partialmethod):
"""Extension of meth:`functools.partialmethod` that preserves docstrings"""
def __get__(self, instance, owner):
func = super(partialmethodWithDoc, self).__get__(instance, owner)
func.__doc__ = self.__doc__
return func
class Client:
"""Simple class to redirect unknown actions directly to the server. Arguments
to the constructor are passed to the RPCClient constructor as they are.
Some of them can however be overwritten at each call (url and timeout).
- The self.serverURL member should be set by the inheriting class
"""
# Default https (RPC)Client
httpsClient = TornadoClient
def __init__(self, **kwargs):
"""C'tor.
:param kwargs: just stored as an attribute and passed when creating
the RPCClient
"""
self.serverURL = kwargs.pop("url", None)
self.__kwargs = kwargs
self.timeout = DEFAULT_RPC_TIMEOUT
def __getattr__(self, name):
"""Store the attribute asked and call executeRPC.
This means that Client should not be shared between threads !
"""
# This allows the dir() method to work as well as tab completion in ipython
if name == "__dir__":
return super(Client, self).__getattr__() # pylint: disable=no-member
return partial(self.executeRPC, call=name)
def setServer(self, url):
"""Set the server URL used by default
:param url: url of the service
"""
self.serverURL = url
def getClientKWArgs(self):
"""Returns a copy of the connection arguments"""
return dict(self.__kwargs)
def getServer(self):
"""Getter for the server url. Useful ?"""
return self.serverURL
@property
@deprecated("To be removed once we're sure self.call has been removed")
def call(self):
raise NotImplementedError("This should be unreachable")
def executeRPC(self, *parms, **kws):
"""This method extracts some parameters from kwargs that
are used as parameter of the constructor or RPCClient.
Unfortunately, only a few of all the available
parameters of BaseClient are exposed.
:param rpc: if an RPC client is passed, use that one
:param timeout: we can change the timeout on a per call bases. Default is self.timeout
:param url: We can specify which url to use
"""
toExecute = kws.pop("call")
# Check whether 'rpc' keyword is specified
rpc = kws.pop("rpc", False)
# Check whether the 'timeout' keyword is specified
timeout = kws.pop("timeout", self.timeout)
# Check whether the 'url' keyword is specified
url = kws.pop("url", "")
# Create the RPCClient
rpcClient = self._getRPC(rpc=rpc, url=url, timeout=timeout)
# Execute the method
return getattr(rpcClient, toExecute)(*parms)
def _getRPC(self, rpc=None, url="", timeout=None):
"""Return an RPCClient object constructed following the attributes.
:param rpc: if set, returns this object
:param url: url of the service. If not set, use self.serverURL
:param timeout: timeout of the call. If not given, self.timeout will be used
"""
if not rpc:
if not url:
url = self.serverURL
if not timeout:
timeout = self.timeout
self.__kwargs["timeout"] = timeout
rpc = RPCClientSelector(url, httpsClient=self.httpsClient, **self.__kwargs)
return rpc
def createClient(serviceName):
"""Decorator to expose the service functions automatically in the Client.
:param str serviceName: system/service. e.g. WorkloadManagement/JobMonitoring
"""
systemName, handlerName = serviceName.split("/")
handlerModuleName = handlerName + "Handler"
# by convention they are the same
handlerClassName = handlerModuleName
handlerClassPath = "%sSystem.Service.%s.%s" % (systemName, handlerModuleName, handlerClassName)
def genFunc(funcName, arguments, handlerClassPath, doc):
"""Create a function with *funcName* taking *arguments*."""
doc = "" if doc is None else doc
funcDocString = "%s(%s, **kwargs)\n" % (funcName, ", ".join(arguments))
# do not describe self or cls in the parameter description
if arguments and arguments[0] in ("self", "cls"):
arguments = arguments[1:]
# Create the actual functions, with or without arguments, **kwargs can be: rpc, timeout, url
func = partialmethodWithDoc(Client.executeRPC, call=funcName)
func.__doc__ = funcDocString + doc
func.__doc__ += "\n\nAutomatically created for the service function "
func.__doc__ += ":func:`~%s.export_%s`" % (handlerClassPath, funcName)
# add description for parameters, if that is not already done for the docstring of function in the service
if arguments and ":param " not in doc:
func.__doc__ += "\n\n"
func.__doc__ += "\n".join(":param %s: %s" % (par, par) for par in arguments)
return func
def addFunctions(clientCls):
"""Add the functions to the decorated class."""
attrDict = dict(clientCls.__dict__)
for extension in extensionsByPriority():
try:
path = importlib_resources.path(
"%s.%sSystem.Service" % (extension, systemName),
"%s.py" % handlerModuleName,
)
fullHandlerClassPath = "%s.%s" % (extension, handlerClassPath)
with path as fp:
handlerAst = ast.parse(fp.read_text(), str(path))
except (ImportError, OSError):
continue
# loop over all the nodes (classes, functions, imports) in the handlerModule
for node in ast.iter_child_nodes(handlerAst):
# find only a class with the name of the handlerClass
if not (isinstance(node, ast.ClassDef) and node.name == handlerClassName):
continue
for member in ast.iter_child_nodes(node):
# only look at functions
if not isinstance(member, ast.FunctionDef):
continue
if not member.name.startswith("export_"):
continue
funcName = member.name[len("export_") :]
if funcName in attrDict:
continue
arguments = [a.arg for a in member.args.args]
# add the implementation of the function to the class attributes
attrDict[funcName] = genFunc(funcName, arguments, fullHandlerClassPath, ast.get_docstring(member))
return type(clientCls.__name__, clientCls.__bases__, attrDict)
return addFunctions
def executeRPCStub(rpcStub):
"""
Playback a stub with the correct client (https or dips)
"""
baseStub, methName, args = rpcStub
url, callParams = baseStub
# Make a copy to update it
stub = dict(callParams)
stub["url"] = url
# Generate a RPCClient with the same parameters
client = Client(**stub)
# Get a functor to execute the RPC call
rpcFunc = getattr(client, methName)
# Reproduce the call
return rpcFunc(*args)
| ic-hep/DIRAC | src/DIRAC/Core/Base/Client.py | Python | gpl-3.0 | 8,250 | [
"DIRAC"
] | aa63ab0574fd857badc35e2da0c57e4c7a838ee57086a08c6af79e6462e29816 |
from __future__ import print_function, division
import numpy as np
from numpy import zeros, dot
def omask2wgts_loops(mf, omask, over):
""" Finds weights """
ksn2w = zeros(mf.mo_energy.shape[:])
for k in range(mf.nkpoints):
for s in range(mf.nspin):
for n in range(mf.norbs):
ksn2w[k,s,n] = dot( dot(omask*mf.mo_coeff[k,s,n,:,0], over), mf.mo_coeff[k,s,n,:,0])
return ksn2w
def gdos(mf, zomegas, omask=None, mat=None, nkpoints=1):
""" Compute some masked (over atomic orbitals) or total Density of States or any population analysis """
mat = mf.hsx.s4_csr.toarray() if mat is None else mat
omask = np.ones(mf.norbs) if omask is None else omask
ksn2w = omask2wgts_loops(mf, omask, mat)
gdos = zeros(len(zomegas))
for iw,zw in enumerate(zomegas):
gdos[iw] = (ksn2w[:,:,:]/(zw - mf.mo_energy[:,:,:])).sum().imag
return -gdos/np.pi/nkpoints
def lsoa_dos(mf, zomegas, lsoa=None, nkpoints=1):
""" Compute the Partial Density of States according to a list of atoms """
lsoa = range(mf.natoms) if lsoa is None else lsoa
mask = zeros(mf.norbs)
for a in lsoa: mask[mf.atom2s[a]:mf.atom2s[a+1]] = 1.0
#over = mf.hsx.s4_csr.toarray()
if hasattr(mf, 'hsx') :
over = mf.hsx.s4_csr.toarray()
else:
over = mf.overlap_lil().toarray()
dos = gdos(mf, zomegas, mask, over, nkpoints)
return dos
def pdos(mf, zomegas, nkpoints=1):
""" Compute the Partial Density of States (resolved in angular momentum of the orbitals) using the eigenvalues and eigenvectors in wfsx """
jmx = mf.ao_log.jmx
if hasattr(mf, 'hsx') :
over = mf.hsx.s4_csr.toarray()
else:
over = mf.overlap_lil().toarray()
orb2j = mf.get_orb2j()
pdos = zeros((jmx+1,len(zomegas)))
for j in range(jmx+1):
mask = (orb2j==j)
pdos[j] = gdos(mf, zomegas, mask, over, nkpoints)
return pdos
#
# Example of plotting DOS calculated by GW calculation.
#
if __name__=='__main__':
import numpy as np
import matplotlib.pyplot as plt
from pyscf import gto, scf
from pyscf.nao import gw as gw_c
mol = gto.M( verbose = 0, atom = '''C 0.0, 0.0, -0.611046 ; N 0.0, 0.0, 0.523753''', basis = 'cc-pvdz', spin=1, charge=0)
gto_mf_UHF = scf.UHF(mol)
gto_mf_UHF.kernel()
gw = gw_c(mf=gto_mf_UHF, gto=mol, verbosity=1, niter_max_ev=20)
omegas = np.arange(-1.0, 1.0, 0.005)+1j*0.01
dos= lsoa_dos(mf=gw, zomegas=omegas)
pdos= pdos(mf=gw, zomegas=omegas)
data=np.zeros((pdos.shape[0]+2, pdos.shape[1]))
data[0,:] = omegas.real*27.2114
data[1, :] = dos.clip(min=0)
data[2:, :] = pdos.clip(min=0)
np.savetxt('dos.dat', data.T, fmt='%14.6f', header=' Energy(eV)\t Total DOS\t s_state\t p_state\t d_state')
#plotting DOS and PDOS
x = data.T [:,0] #Energies
y1 = data.T [:,1] #Total DOS
y2 = data.T [:,2] #s_state
y3 = data.T [:,3] #p_state
y4 = data.T [:,4] #d_state
plt.plot(x, y1, label='Total DOS')
plt.plot(x, y2, label='s_state')
plt.plot(x, y3, label='p_state')
plt.plot(x, y4, label='d_state')
plt.axvline(x=gw.fermi_energy*27.2114,color='k', linestyle='--', label='Fermi Energy')
plt.title('DOS', fontsize=20)
plt.xlabel('Energy (eV)', fontsize=15)
plt.ylabel('Density of States (electron/eV)', fontsize=15)
plt.legend()
plt.show()
#plots DOS for each atoms
for i in range (gw.natoms):
local_dos= lsoa_dos(mf=gw, zomegas=omegas,lsoa=[i])
data[1,:] = local_dos.clip(min=0)
plt.plot(x, data.T [:,1], label='Local DOS of atom '+gw.sp2symbol[i])
plt.xlabel('Energy (eV)', fontsize=15)
plt.axvline(x=gw.fermi_energy*27.2114,color='k', linestyle='--', label='Fermi Energy')
plt.ylabel('Local Density of States (electron/eV)', fontsize=12)
plt.legend()
plt.show()
| gkc1000/pyscf | pyscf/nao/m_dos_pdos_ldos.py | Python | apache-2.0 | 3,808 | [
"PySCF"
] | c2d4fbc65494037c654cad5c9f0f73d8caab8d1e18d228347d682bad51287c7c |
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import plistlib
import os
import numpy
import vtk
import wx
import wx.lib.pubsub as ps
import constants as const
import project as prj
from data import vtk_utils
from vtk.util import numpy_support
Kernels = {
"Basic Smooth 5x5" : [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 4.0, 4.0, 4.0, 1.0,
1.0, 4.0, 12.0, 4.0, 1.0,
1.0, 4.0, 4.0, 4.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
}
SHADING = {
"Default": {
"ambient" :0.15,
"diffuse" :0.9,
"specular" :0.3,
"specularPower" :15,
},
"Glossy Vascular":{
"ambient" :0.15,
"diffuse" :0.28,
"specular" :1.42,
"specularPower" :50,
},
"Glossy Bone": {
"ambient" :0.15,
"diffuse" :0.24,
"specular" :1.17,
"specularPower" :6.98,
},
"Endoscopy": {
"ambient" :0.12,
"diffuse" :0.64,
"specular" :0.73,
"specularPower" :50,
}
}
class Volume():
def __init__(self):
self.config = None
self.exist = None
self.color_transfer = None
self.opacity_transfer_func = None
self.ww = None
self.wl = None
self.curve = 0
self.plane = None
self.plane_on = False
self.volume = None
self.__bind_events()
def __bind_events(self):
ps.Publisher().subscribe(self.OnHideVolume,
'Hide raycasting volume')
ps.Publisher().subscribe(self.OnUpdatePreset,
'Update raycasting preset')
ps.Publisher().subscribe(self.OnSetCurve,
'Set raycasting curve')
ps.Publisher().subscribe(self.OnSetWindowLevel,
'Set raycasting wwwl')
ps.Publisher().subscribe(self.Refresh,
'Set raycasting refresh')
ps.Publisher().subscribe(self.OnSetRelativeWindowLevel,
'Set raycasting relative window and level')
ps.Publisher().subscribe(self.OnEnableTool,
'Enable raycasting tool')
ps.Publisher().subscribe(self.OnCloseProject, 'Close project data')
ps.Publisher().subscribe(self.ChangeBackgroundColour,
'Change volume viewer background colour')
def OnCloseProject(self, pubsub_evt):
self.CloseProject()
def CloseProject(self):
#if self.plane:
# self.plane = None
# ps.Publisher().sendMessage('Remove surface actor from viewer', self.plane_actor)
if self.plane:
self.plane.DestroyObjs()
del self.plane
self.plane = 0
if self.exist:
self.exist = None
ps.Publisher().sendMessage('Remove surface actor from viewer', self.volume)
ps.Publisher().sendMessage('Disable volume cut menu')
def OnLoadVolume(self, pubsub_evt):
label = pubsub_evt.data
#self.LoadConfig(label)
self.LoadVolume()
def OnHideVolume(self, pubsub_evt):
self.volume.SetVisibility(0)
if (self.plane and self.plane_on):
self.plane.Disable()
ps.Publisher().sendMessage('Render volume viewer')
def OnShowVolume(self, pubsub_evt):
if self.exist:
self.volume.SetVisibility(1)
if (self.plane and self.plane_on):
self.plane.Enable()
ps.Publisher().sendMessage('Render volume viewer')
else:
ps.Publisher.sendMessage('Load raycasting preset', const.RAYCASTING_LABEL)
self.LoadConfig()
self.LoadVolume()
self.exist = 1
def OnUpdatePreset(self, pubsub_evt):
self.__load_preset_config()
if self.config:
if self.exist:
self.__load_preset()
self.volume.SetVisibility(1)
#ps.Publisher().sendMessage('Render volume viewer')
else:
self.LoadVolume()
self.CalculateHistogram()
self.exist = 1
colour = self.GetBackgroundColour()
ps.Publisher.sendMessage('Change volume viewer background colour', colour)
ps.Publisher.sendMessage('Change volume viewer gui colour', colour)
def __load_preset_config(self):
self.config = prj.Project().raycasting_preset
def __update_colour_table(self):
if self.config['advancedCLUT']:
self.Create16bColorTable(self.scale)
self.CreateOpacityTable(self.scale)
else:
self.Create8bColorTable(self.scale)
self.Create8bOpacityTable(self.scale)
def __load_preset(self):
# Update colour table
self.__update_colour_table()
# Update convolution filter
original_imagedata = self.imagedata.GetOutput()
imagedata = self.ApplyConvolution(original_imagedata)
self.volume_mapper.SetInput(imagedata)
# Update other information
self.SetShading()
self.SetTypeRaycasting()
def OnSetCurve(self, pubsub_evt):
self.curve = pubsub_evt.data
self.CalculateWWWL()
ww = self.ww
wl = self.wl
ps.Publisher().sendMessage('Set volume window and level text',
(ww, wl))
def OnSetRelativeWindowLevel(self, pubsub_evt):
diff_wl, diff_ww = pubsub_evt.data
ww = self.ww + diff_ww
wl = self.wl + diff_wl
ps.Publisher().sendMessage('Set volume window and level text',
(ww, wl))
self.SetWWWL(ww, wl)
self.ww = ww
self.wl = wl
def OnSetWindowLevel(self, pubsub_evt):
ww, wl, n = pubsub_evt.data
self.curve = n
self.SetWWWL(ww,wl)
def SetWWWL(self, ww, wl):
if self.config['advancedCLUT']:
try:
curve = self.config['16bitClutCurves'][self.curve]
except IndexError:
self.curve = 0
curve = self.config['16bitClutCurves'][self.curve]
p1 = curve[0]
p2 = curve[-1]
half = (p2['x'] - p1['x']) / 2.0
middle = p1['x'] + half
shiftWL = wl - middle
shiftWW = p1['x'] + shiftWL - (wl - 0.5 * ww)
factor = 1.0
for n,i in enumerate(curve):
factor = abs(i['x'] - middle) / half
if factor < 0:
factor = 0
i['x'] += shiftWL
if n < len(curve)/2.0:
i['x'] -= shiftWW * factor
else:
i['x'] += shiftWW * factor
else:
self.config['wl'] = wl
self.config['ww'] = ww
self.__update_colour_table()
def CalculateWWWL(self):
"""
Get the window width & level from the selected curve
"""
try:
curve = self.config['16bitClutCurves'][self.curve]
except IndexError:
self.curve -= 1
curve = self.config['16bitClutCurves'][self.curve]
first_point = curve[0]['x']
last_point = curve[-1]['x']
self.ww = last_point - first_point
self.wl = first_point + self.ww / 2.0
def Refresh(self, pubsub_evt):
self.__update_colour_table()
def Create16bColorTable(self, scale):
if self.color_transfer:
color_transfer = self.color_transfer
else:
color_transfer = vtk.vtkColorTransferFunction()
color_transfer.RemoveAllPoints()
curve_table = self.config['16bitClutCurves']
color_table = self.config['16bitClutColors']
colors = []
for i, l in enumerate(curve_table):
for j, lopacity in enumerate(l):
gray_level = lopacity['x']
r = color_table[i][j]['red']
g = color_table[i][j]['green']
b = color_table[i][j]['blue']
colors.append((gray_level, r, g, b))
color_transfer.AddRGBPoint(
self.TranslateScale(scale, gray_level),
r, g, b)
self.color_transfer = color_transfer
def Create8bColorTable(self, scale):
if self.color_transfer:
color_transfer = self.color_transfer
else:
color_transfer = vtk.vtkColorTransferFunction()
color_transfer.RemoveAllPoints()
color_preset = self.config['CLUT']
if color_preset != "No CLUT":
p = plistlib.readPlist(
os.path.join(const.RAYCASTING_PRESETS_DIRECTORY,
'color_list', color_preset + '.plist'))
r = p['Red']
g = p['Green']
b = p['Blue']
colors = zip(r,g,b)
ww = self.config['ww']
wl = self.TranslateScale(scale, self.config['wl'])
init = wl - ww/2.0
inc = ww / (len(colors) - 1.0)
for n,rgb in enumerate(colors):
color_transfer.AddRGBPoint(init + n * inc, *[i/255.0 for i in rgb])
self.color_transfer = color_transfer
def CreateOpacityTable(self, scale):
if self.opacity_transfer_func:
opacity_transfer_func = self.opacity_transfer_func
else:
opacity_transfer_func = vtk.vtkPiecewiseFunction()
opacity_transfer_func.RemoveAllPoints()
curve_table = self.config['16bitClutCurves']
opacities = []
ww = self.config['ww']
wl = self.config['wl']
self.ww = ww
self.wl = wl
l1 = wl - ww/2.0
l2 = wl + ww/2.0
k1 = 0.0
k2 = 1.0
opacity_transfer_func.AddSegment(0, 0, 2**16-1, 0)
for i, l in enumerate(curve_table):
for j, lopacity in enumerate(l):
gray_level = lopacity['x']
#if gray_level <= l1:
# opacity = k1
#elif gray_level > l2:
# opacity = k2
#else:
opacity = lopacity['y']
opacities.append((gray_level, opacity))
opacity_transfer_func.AddPoint(
self.TranslateScale(scale, gray_level), opacity)
self.opacity_transfer_func = opacity_transfer_func
def Create8bOpacityTable(self, scale):
if self.opacity_transfer_func:
opacity_transfer_func = self.opacity_transfer_func
else:
opacity_transfer_func = vtk.vtkPiecewiseFunction()
opacity_transfer_func.RemoveAllPoints()
opacities = []
ww = self.config['ww']
wl = self.TranslateScale(scale, self.config['wl'])
l1 = wl - ww/2.0
l2 = wl + ww/2.0
self.ww = ww
self.wl = self.config['wl']
opacity_transfer_func.RemoveAllPoints()
opacity_transfer_func.AddSegment(0, 0, 2**16-1, 0)
k1 = 0.0
k2 = 1.0
opacity_transfer_func.AddPoint(l1, 0)
opacity_transfer_func.AddPoint(l2, 1)
self.opacity_transfer_func = opacity_transfer_func
return opacity_transfer_func
def GetBackgroundColour(self):
colour = (self.config['backgroundColorRedComponent'],
self.config['backgroundColorGreenComponent'],
self.config['backgroundColorBlueComponent'])
return colour
def ChangeBackgroundColour(self, pubsub_evt):
if (self.config):
self.config['backgroundColorRedComponent'] = pubsub_evt.data[0] * 255
self.config['backgroundColorGreenComponent'] = pubsub_evt.data[1] * 255
self.config['backgroundColorBlueComponent'] = pubsub_evt.data[2] * 255
def BuildTable():
curve_table = p['16bitClutCurves']
color_background = (p['backgroundColorRedComponent'],
p['backgroundColorGreenComponent'],
p['backgroundColorBlueComponent'])
color_background = [i for i in color_background]
opacities = []
colors = []
for i, l in enumerate(curve_table):
for j, lopacity in enumerate(l):
gray_level = lopacity['x']
opacity = lopacity['y']
opacities.append((gray_level, opacity))
r = color_table[i][j]['red']
g = color_table[i][j]['green']
b = color_table[i][j]['blue']
colors.append((gray_level, r, g, b))
return colors, opacities, color_background, p['useShading']
def SetShading(self):
if self.config['useShading']:
self.volume_properties.ShadeOn()
else:
self.volume_properties.ShadeOff()
shading = SHADING[self.config['shading']]
self.volume_properties.SetAmbient(shading['ambient'])
self.volume_properties.SetDiffuse(shading['diffuse'])
self.volume_properties.SetSpecular(shading['specular'])
self.volume_properties.SetSpecularPower(shading['specularPower'])
def SetTypeRaycasting(self):
if self.volume_mapper.IsA("vtkFixedPointVolumeRayCastMapper"):
if self.config.get('MIP', False):
self.volume_mapper.SetBlendModeToMaximumIntensity()
else:
self.volume_mapper.SetBlendModeToComposite()
else:
if self.config.get('MIP', False):
raycasting_function = vtk.vtkVolumeRayCastMIPFunction()
else:
raycasting_function = vtk.vtkVolumeRayCastCompositeFunction()
raycasting_function.SetCompositeMethodToInterpolateFirst()
self.volume_mapper.SetVolumeRayCastFunction(raycasting_function)
def ApplyConvolution(self, imagedata, update_progress = None):
number_filters = len(self.config['convolutionFilters'])
if number_filters:
if not(update_progress):
update_progress = vtk_utils.ShowProgress(number_filters)
for filter in self.config['convolutionFilters']:
convolve = vtk.vtkImageConvolve()
convolve.SetInput(imagedata)
convolve.SetKernel5x5([i/60.0 for i in Kernels[filter]])
convolve.AddObserver("ProgressEvent", lambda obj,evt:
update_progress(convolve, "Rendering..."))
imagedata = convolve.GetOutput()
#convolve.GetOutput().ReleaseDataFlagOn()
return imagedata
def LoadVolume(self):
proj = prj.Project()
image = proj.imagedata
number_filters = len(self.config['convolutionFilters'])
if (prj.Project().original_orientation == const.AXIAL):
flip_image = True
else:
flip_image = False
if (flip_image):
update_progress= vtk_utils.ShowProgress(2 + number_filters)
# Flip original vtkImageData
flip = vtk.vtkImageFlip()
flip.SetInput(image)
flip.SetFilteredAxis(1)
flip.FlipAboutOriginOn()
flip.AddObserver("ProgressEvent", lambda obj,evt:
update_progress(flip, "Rendering..."))
flip.Update()
image = flip.GetOutput()
else:
update_progress= vtk_utils.ShowProgress(1 + number_filters)
scale = image.GetScalarRange()
self.scale = scale
cast = vtk.vtkImageShiftScale()
cast.SetInput(image)
cast.SetShift(abs(scale[0]))
cast.SetOutputScalarTypeToUnsignedShort()
cast.AddObserver("ProgressEvent", lambda obj,evt:
update_progress(cast, "Rendering..."))
cast.Update()
image2 = cast
self.imagedata = image2
if self.config['advancedCLUT']:
self.Create16bColorTable(scale)
self.CreateOpacityTable(scale)
else:
self.Create8bColorTable(scale)
self.Create8bOpacityTable(scale)
image2 = self.ApplyConvolution(image2.GetOutput(), update_progress)
self.final_imagedata = image2
# Changed the vtkVolumeRayCast to vtkFixedPointVolumeRayCastMapper
# because it's faster and the image is better
# TODO: To test if it's true.
if const.TYPE_RAYCASTING_MAPPER:
volume_mapper = vtk.vtkVolumeRayCastMapper()
#volume_mapper.AutoAdjustSampleDistancesOff()
#volume_mapper.SetInput(image2)
#volume_mapper.SetVolumeRayCastFunction(composite_function)
#volume_mapper.SetGradientEstimator(gradientEstimator)
volume_mapper.IntermixIntersectingGeometryOn()
self.volume_mapper = volume_mapper
else:
volume_mapper = vtk.vtkFixedPointVolumeRayCastMapper()
#volume_mapper.AutoAdjustSampleDistancesOff()
self.volume_mapper = volume_mapper
volume_mapper.IntermixIntersectingGeometryOn()
self.SetTypeRaycasting()
volume_mapper.SetInput(image2)
# TODO: Look to this
#volume_mapper_hw = vtk.vtkVolumeTextureMapper3D()
#volume_mapper_hw.SetInput(image2)
#Cut Plane
#CutPlane(image2, volume_mapper)
#self.color_transfer = color_transfer
volume_properties = vtk.vtkVolumeProperty()
#volume_properties.IndependentComponentsOn()
volume_properties.SetInterpolationTypeToLinear()
volume_properties.SetColor(self.color_transfer)
try:
volume_properties.SetScalarOpacity(self.opacity_transfer_func)
except NameError:
pass
# Using these lines to improve the raycasting quality. These values
# seems related to the distance from ray from raycasting.
# TODO: Need to see values that improve the quality and don't decrease
# the performance. 2.0 seems to be a good value to pix_diag
pix_diag = 2.0
volume_mapper.SetImageSampleDistance(0.25)
volume_mapper.SetSampleDistance(pix_diag / 5.0)
volume_properties.SetScalarOpacityUnitDistance(pix_diag)
self.volume_properties = volume_properties
self.SetShading()
volume = vtk.vtkVolume()
volume.SetMapper(volume_mapper)
volume.SetProperty(volume_properties)
self.volume = volume
colour = self.GetBackgroundColour()
ps.Publisher().sendMessage('Load volume into viewer',
(volume, colour, (self.ww, self.wl)))
def OnEnableTool(self, pubsub_evt):
tool_name, enable = pubsub_evt.data
if tool_name == _("Cut plane"):
if self.plane:
if enable:
self.plane_on = True
self.plane.Enable()
else:
self.plane_on = False
self.plane.Disable()
else:
self.final_imagedata.Update()
self.plane_on = True
self.plane = CutPlane(self.final_imagedata,
self.volume_mapper)
def CalculateHistogram(self):
proj = prj.Project()
image = proj.imagedata
r = int(image.GetScalarRange()[1] - image.GetScalarRange()[0])
accumulate = vtk.vtkImageAccumulate()
accumulate.SetInput(image)
accumulate.SetComponentExtent(0, r -1, 0, 0, 0, 0)
accumulate.SetComponentOrigin(image.GetScalarRange()[0], 0, 0)
accumulate.Update()
n_image = numpy_support.vtk_to_numpy(accumulate.GetOutput().GetPointData().GetScalars())
ps.Publisher().sendMessage('Load histogram', (n_image,
image.GetScalarRange()))
def TranslateScale(self, scale, value):
#if value < 0:
# valor = 2**16 - abs(value)
#else:
# valor = value
return value - scale[0]
class CutPlane:
def __init__(self, img, volume_mapper):
self.img = img
self.volume_mapper = volume_mapper
self.Create()
self.__bind_events()
def __bind_events(self):
ps.Publisher().subscribe(self.Reset,
'Reset Cut Plane')
ps.Publisher().subscribe(self.Enable,
'Enable Cut Plane')
ps.Publisher().subscribe(self.Disable,
'Disable Cut Plane')
def Create(self):
self.plane_widget = plane_widget = vtk.vtkImagePlaneWidget()
plane_widget.SetInput(self.img)
plane_widget.SetPlaneOrientationToXAxes()
#plane_widget.SetResliceInterpolateToLinear()
plane_widget.TextureVisibilityOff()
#Set left mouse button to move and rotate plane
plane_widget.SetLeftButtonAction(1)
#SetColor margin to green
margin_property = plane_widget.GetMarginProperty()
margin_property.SetColor(0,0.8,0)
#Disable cross
cursor_property = plane_widget.GetCursorProperty()
cursor_property.SetOpacity(0)
self.plane_source = plane_source = vtk.vtkPlaneSource()
plane_source.SetOrigin(plane_widget.GetOrigin())
plane_source.SetPoint1(plane_widget.GetPoint1())
plane_source.SetPoint2(plane_widget.GetPoint2())
plane_source.SetNormal(plane_widget.GetNormal())
plane_mapper = self.plane_mapper = vtk.vtkPolyDataMapper()
plane_mapper.SetInput(plane_source.GetOutput())
self.plane_actor = plane_actor = vtk.vtkActor()
plane_actor.SetMapper(plane_mapper)
plane_actor.GetProperty().BackfaceCullingOn()
plane_actor.GetProperty().SetOpacity(0)
plane_widget.AddObserver("InteractionEvent", self.Update)
ps.Publisher().sendMessage('AppendActor', self.plane_actor)
ps.Publisher().sendMessage('Set Widget Interactor', self.plane_widget)
plane_actor.SetVisibility(1)
plane_widget.On()
self.plane = plane = vtk.vtkPlane()
plane.SetNormal(self.plane_source.GetNormal())
plane.SetOrigin(self.plane_source.GetOrigin())
self.volume_mapper.AddClippingPlane(plane)
#Storage First Position
self.origin = plane_widget.GetOrigin()
self.p1 = plane_widget.GetPoint1()
self.p2 = plane_widget.GetPoint2()
self.normal = plane_widget.GetNormal()
def Update(self, a, b):
plane_source = self.plane_source
plane_widget = self.plane_widget
plane_source.SetOrigin(plane_widget.GetOrigin())
plane_source.SetPoint1(plane_widget.GetPoint1())
plane_source.SetPoint2(plane_widget.GetPoint2())
plane_source.SetNormal(plane_widget.GetNormal())
self.plane_actor.VisibilityOn()
self.plane.SetNormal(plane_source.GetNormal())
self.plane.SetOrigin(plane_source.GetOrigin())
ps.Publisher().sendMessage('Render volume viewer', None)
def Enable(self, evt_pubsub=None):
self.plane_widget.On()
self.plane_actor.VisibilityOn()
self.volume_mapper.AddClippingPlane(self.plane)
ps.Publisher().sendMessage('Render volume viewer', None)
def Disable(self,evt_pubsub=None):
self.plane_widget.Off()
self.plane_actor.VisibilityOff()
self.volume_mapper.RemoveClippingPlane(self.plane)
ps.Publisher().sendMessage('Render volume viewer', None)
def Reset(self, evt_pubsub=None):
plane_source = self.plane_source
plane_widget = self.plane_widget
plane_source.SetOrigin(self.origin)
plane_source.SetPoint1(self.p1)
plane_source.SetPoint2(self.p2)
plane_source.SetNormal(self.normal)
self.plane_actor.VisibilityOn()
self.plane.SetNormal(self.normal)
self.plane.SetOrigin(self.origin)
ps.Publisher().sendMessage('Render volume viewer', None)
def DestroyObjs(self):
ps.Publisher().sendMessage('Remove surface actor from viewer', self.plane_actor)
self.Disable()
del self.plane_widget
del self.plane_source
del self.plane_actor
del self.normal
del self.plane
| tatiana/invesalius | invesalius/data/volume.py | Python | gpl-2.0 | 25,571 | [
"VTK"
] | 96e1b2ec444a592c3a1647fa93900587f632ca3bc5ff9bf1e399e007aaf04728 |
#!/usr/bin/env python
import os, sys, time, socket, getopt, getpass
import Pyro.core
import shutil
import time
import getopt
# from Bio.Blast import NCBIStandalone
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastpCommandline
from StringIO import StringIO
#from Bio import Fasta
from types import *
class options:
def __init__(self, argv):
try:
opts, args = getopt.getopt(argv, "hpn:u:a:d:", ["help", "password", "nsname=", "user=", "app-dir=", "data-dir="])
except getopt.GetoptError:
print 'error running getopt.getopt'
self.usage()
self.argDict = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
self.usage()
sys.exit()
elif opt in ("-p", "--password"):
self.argDict['password'] = getpass.getpass('password: ')
elif opt in ("-n", "--nsname"):
self.argDict['nsname'] = arg
elif opt in ("-u", "--user"):
self.argDict['user'] = arg
elif opt in ("-a", "--app-dir"):
self.argDict['app-dir'] = arg
elif opt in ("-d", "--data-dir"):
self.argDict['data-dir'] = arg
if not self.argDict.has_key('password'): self.argDict['password'] = ''
required_args = ('nsname', 'user', 'app-dir', 'data-dir')
for a in required_args:
if a not in self.argDict:
print "required argument '%s' is missing" % a
self.usage()
sys.exit()
def usage(self):
'''Prints program usage information'''
print """blastclient.py [OPTION] [ARGUMENT]
-h, --help: print this usage information
-u, --user=<username>: specify a username on the database
-p, --password: prompt for a password
-a, --app-dir: location where BLAST is installed
-d, --data-dir: location where fasta database should be stored
-n, --nsname: PYRO NS name, required"""
class blast:
def __init__(self, blastDataDir='/tmp/BLAST', blastAppDir='~/Applications/BLAST/bin/'):
if not os.path.isdir(blastDataDir):
print "data directory '%s' doesn't exist. creating it..." % blastDataDir
os.mkdir(blastDataDir)
print "checking for 'formatdb' in '%s' " % blastDataDir
if os.path.exists(os.path.join(blastDataDir, 'formatdb')): print 'yes'
else:
print 'no'
shutil.copy(os.path.join(blastAppDir,'formatdb'), blastDataDir)
print "copying 'formatdb' to '%s'" % blastDataDir
self.blastDataDir, self.blastAppDir = blastDataDir, blastAppDir
def blast(self):
'''aligns sequences using blast'''
blastAppDir = self.blastAppDir
blastDB = os.path.join(self.blastDataDir, 'blastDB.fasta')
blastQueryFile = os.path.join(self.blastDataDir, 'filetoblast.txt')
print 'path to filetoblast.txt:', blastQueryFile
if sys.platform == 'win32':
blastall_name = 'Blastall.exe'
else:
blastall_name = 'blastall'
blast_exe = os.path.join(blastAppDir, blastall_name)
if sys.platform == 'win32':
import win32api
blastDB = win32api.GetShortPathName(blast_db)
blastQueryFile = win32api.GetShortPathName(blastQueryFile)
blast_exe = win32api.GetShortPathName(blast_exe)
# blast_out, error_info = NCBIStandalone.blastall(blast_exe, 'blastp', blastDB, blastQueryFile, align_view=7)
blast_out = NcbiblastpCommandline(query=blastQueryFile, db=blastDB, outfmt=5)()[0]
#print error_info.read()
#print blast_out.read()
blast_records = NCBIXML.parse(StringIO(blast_out))
results = []
recordnumber = 0
nonmatchingQueries = []
while 1:
recordnumber += 1
try: b_record = blast_records.next()
except StopIteration: break
if not b_record:
continue
print 'query:', b_record.query
e_value_thresh = 0.0001
significant = False
for alignment in b_record.alignments:
bestHsp = None
for hsp in alignment.hsps:
if not bestHsp: bestHsp = hsp.expect
elif bestHsp < hsp.expect: continue
if hsp.expect < e_value_thresh:
alignment.title = alignment.title.replace(">","")
#if b_record.query != alignment.title:
#print 'dir(alignment):', dir(alignment)
#print 'hsps: ',alignment.hsps, 'accession:', alignment.accession, 'title:', alignment.title, 'length:', alignment.length
if b_record.query != alignment.accession:
significant = True
print 'adding', b_record.query, 'and', alignment.accession, 'to matches (e value: ',hsp.expect, ', bit score: ', hsp.bits, ')'
results.append((b_record.query, alignment.accession, hsp.expect, hsp.bits))
print b_record.query, significant
#if not significant:
# print 'adding', b_record.query, 'to the list of queries without matches'
# results.append((b_record.query, None, None))
return results
def get_blastWorkUnit(self, blastDataDir):
print 'getting blast work unit...'
self.blastWorkUnit = self.phamServer.request_seqs(self.client)
if hasattr(self.blastWorkUnit, 'database'): # Practical test to see if there is work in the work unit
self.write_blast_db()
self.write_blast_query()
return True
else:
return False
def write_blast_db(self):
print 'writing work unit to file...'
f = open(os.path.join(self.blastDataDir, 'blastDB.fasta'), 'w')
f.write(self.blastWorkUnit.get_as_fasta())
f.close()
if sys.platform == 'win32':
formatdb = 'formatdb.exe '
else:
formatdb = 'formatdb'
os.system(os.path.join(self.blastDataDir, formatdb) + ' -i ' + os.path.join(self.blastDataDir, 'blastDB.fasta -o T'))
def write_blast_query(self):
print 'getting query sequence from the server'
f = open(os.path.join(self.blastDataDir, 'filetoblast.txt'), 'w')
f.write('>%s\n%s\n' % (self.blastWorkUnit.query_id, self.blastWorkUnit.query_translation))
f.close()
def main(argv):
opts = options(sys.argv[1:]).argDict
blaster = blast(blastDataDir=opts['data-dir'], blastAppDir=opts['app-dir'])
#Pyro.config.PYRO_NS_HOSTNAME='136.142.141.113'
#Pyro.config.PYRO_NS_HOSTNAME='134.126.95.56'
if opts['nsname']:
Pyro.config.PYRO_NS_HOSTNAME=opts['nsname']
else:
Pyro.config.PYRO_NS_HOSTNAME='localhost'
print 'trying to get serverSelector...'
serverSelector = Pyro.core.getProxyForURI("PYRONAME://serverSelector")
print 'got serverSelector'
blaster.client = socket.gethostname()
print sys.platform, blaster.client
blaster.server = serverSelector.get_server(sys.platform, blaster.client)
print 'using server', blaster.server
blaster.phamServer = Pyro.core.getProxyForURI("PYRONAME://"+blaster.server)
'''Retrieves sequences to BLAST, BLASTs, and reports scores infinitely'''
while 1:
try:
print 'getting sequences to align'
if blaster.get_blastWorkUnit(opts['data-dir']):
print 'aligning sequences'
results = blaster.blast()
print 'results:', results
blaster.phamServer.report_scores(blaster.blastWorkUnit, results, blaster.client)
else:
print 'no work units available...sleeping'
time.sleep(30)
except KeyboardInterrupt:
blaster.phamServer.disconnect(blaster.client)
print 'exiting cleanly'
sys.exit()
if __name__ == '__main__':
try:
main(sys.argv[1:])
except Exception, x:
print ''.join(Pyro.util.getPyroTraceback(x))
print 'exiting on pyro traceback'
sys.exit()
| byuphamerator/phamerator-dev | phamerator/blastclient.py | Python | gpl-2.0 | 7,478 | [
"BLAST"
] | 474c7a4d10216421c5b938cf7bff3f54413145b0418f4b2cb9db1242fa9d1aa1 |
pdb_bmrb_map_text = """
SELECT UPPER(pdb_id) || ' ' || string_agg(bmrb_id, ',' ORDER BY bmrb_id) AS string
FROM (SELECT pdb_id, bmrb_id, 'exact' AS link_type, null AS comment
FROM web.pdb_link
UNION
SELECT UPPER("Database_accession_code"), "Entry_ID", 'author', "Relationship"
FROM macromolecules."Related_entries"
WHERE "Database_name" = 'PDB' AND "Relationship" != 'BMRB Entry Tracking System' AND "Relationship" != 'BMRB Tracking System'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Assembly_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'blast', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" != 'yes') AS sub
WHERE link_type like %s AND pdb_id IS NOT NULL
GROUP BY UPPER(pdb_id)
ORDER BY UPPER(pdb_id);
"""
pdb_bmrb_map_json = """
SELECT UPPER(pdb_id) as pdb_id, array_agg(bmrb_id ORDER BY bmrb_id::int) AS bmrb_ids
FROM (SELECT pdb_id, bmrb_id, 'exact' AS link_type, null AS comment
FROM web.pdb_link
UNION
SELECT UPPER("Database_accession_code"), "Entry_ID", 'author', "Relationship"
FROM macromolecules."Related_entries"
WHERE "Database_name" = 'PDB' AND "Relationship" != 'BMRB Entry Tracking System' AND "Relationship" != 'BMRB
Tracking System'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Assembly_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'blast', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" != 'yes') AS sub
WHERE link_type like %s AND pdb_id IS NOT NULL
GROUP BY UPPER(pdb_id)
ORDER BY UPPER(pdb_id);
"""
bmrb_pdb_map_text = """
SELECT bmrb_id || ' ' || string_agg(pdb_id, ',' ORDER BY pdb_id) AS string
FROM (SELECT pdb_id, bmrb_id, 'exact' AS link_type, null AS comment
FROM web.pdb_link
UNION
SELECT UPPER("Database_accession_code"), "Entry_ID", 'author', "Relationship"
FROM macromolecules."Related_entries"
WHERE "Database_name" = 'PDB' AND "Relationship" != 'BMRB Entry Tracking System' AND "Relationship" != 'BMRB Tracking System'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Assembly_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'blast', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" != 'yes') AS sub
WHERE link_type like %s AND pdb_id IS NOT NULL
GROUP BY bmrb_id
ORDER BY bmrb_id::int;"""
bmrb_pdb_map_json = """
SELECT bmrb_id, array_agg(pdb_id ORDER BY pdb_id) AS pdb_ids
FROM (SELECT pdb_id, bmrb_id, 'exact' AS link_type, null AS comment
FROM web.pdb_link
UNION
SELECT UPPER("Database_accession_code"), "Entry_ID", 'author', "Relationship"
FROM macromolecules."Related_entries"
WHERE "Database_name" = 'PDB' AND "Relationship" != 'BMRB Entry Tracking System' AND "Relationship" != 'BMRB Tracking System'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'author', "Entry_details"
FROM macromolecules."Assembly_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" = 'yes'
UNION
SELECT UPPER("Accession_code"), "Entry_ID", 'blast', "Entry_details"
FROM macromolecules."Entity_db_link"
WHERE "Database_code" = 'PDB' AND "Author_supplied" != 'yes') AS sub
WHERE link_type like %s AND pdb_id IS NOT NULL
GROUP BY bmrb_id
ORDER BY bmrb_id::int;"""
bmrb_uniprot_map_json = """
SELECT bmrb_id, array_agg(uniprot_id) AS uniprot_ids
FROM web.uniprot_mappings
WHERE link_type like %s
GROUP BY bmrb_id
ORDER BY bmrb_id
"""
bmrb_uniprot_map_text = """
SELECT bmrb_id || ' ' || string_agg(uniprot_id, ',' ORDER BY uniprot_id) AS string
FROM web.uniprot_mappings
WHERE link_type like %s
GROUP BY bmrb_id
ORDER BY bmrb_id"""
uniprot_bmrb_map_json = """
SELECT uniprot_id, array_agg(bmrb_id) as bmrb_ids
FROM web.uniprot_mappings
WHERE link_type like %s
GROUP BY uniprot_id
ORDER BY uniprot_id
"""
uniprot_bmrb_map_text = """
SELECT uniprot_id || ' ' || string_agg(bmrb_id, ',' ORDER BY bmrb_id) AS string
FROM web.uniprot_mappings
WHERE link_type like %s
GROUP BY uniprot_id
ORDER BY uniprot_id
"""
uniprot_uniprot_map = """
SELECT DISTINCT(uniprot_id) AS string
FROM web.uniprot_mappings
GROUP BY uniprot_id
ORDER BY uniprot_id"""
| uwbmrb/BMRB-API | server/wsgi/bmrbapi/views/sql/db_links.py | Python | gpl-3.0 | 5,526 | [
"BLAST"
] | 23245aa4fa3351aa4d20db93c5b9554ce2d118ba19afbc7ccbc4eed263da7f5b |
"""Pipeline utilities to retrieve FASTQ formatted files for processing.
"""
import os
import shutil
from bcbio import bam, broad, utils
from bcbio.bam import fastq
from bcbio.distributed import objectstore
from bcbio.pipeline import alignment
from bcbio.pipeline import datadict as dd
from bcbio.utils import file_exists, safe_makedir, splitext_plus
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.ngsalign import alignprep
def get_fastq_files(data):
"""Retrieve fastq files for the given lane, ready to process.
"""
assert "files" in data, "Did not find `files` in input; nothing to process"
ready_files = []
should_gzip = True
# Bowtie does not accept gzipped fastq
if 'bowtie' in data['reference'].keys():
should_gzip = False
for fname in data["files"]:
if fname.endswith(".bam"):
if _pipeline_needs_fastq(data["config"], data):
ready_files = convert_bam_to_fastq(fname, data["dirs"]["work"],
data, data["dirs"], data["config"])
else:
ready_files = [fname]
elif objectstore.is_remote(fname):
ready_files.append(fname)
# Trimming does quality conversion, so if not doing that, do an explicit conversion
elif not(dd.get_trim_reads(data)) and dd.get_quality_format(data) != "standard":
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "fastq_convert"))
ready_files.append(fastq.groom(fname, data, out_dir=out_dir))
else:
ready_files.append(fname)
ready_files = [x for x in ready_files if x is not None]
if should_gzip:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "fastq"))
ready_files = [_gzip_fastq(x, out_dir) for x in ready_files]
for in_file in ready_files:
if not objectstore.is_remote(in_file):
assert os.path.exists(in_file), "%s does not exist." % in_file
return ready_files
def _gzip_fastq(in_file, out_dir=None):
"""
gzip a fastq file if it is not already gzipped, handling conversion
from bzip to gzipped files
"""
if fastq.is_fastq(in_file) and not objectstore.is_remote(in_file):
if utils.is_bzipped(in_file):
return _bzip_gzip(in_file, out_dir)
elif not utils.is_gzipped(in_file):
if out_dir:
gzipped_file = os.path.join(out_dir, os.path.basename(in_file) + ".gz")
else:
gzipped_file = in_file + ".gz"
if file_exists(gzipped_file):
return gzipped_file
message = "gzipping {in_file} to {gzipped_file}.".format(
in_file=in_file, gzipped_file=gzipped_file)
with file_transaction(gzipped_file) as tx_gzipped_file:
do.run("gzip -c {in_file} > {tx_gzipped_file}".format(**locals()),
message)
return gzipped_file
return in_file
def _bzip_gzip(in_file, out_dir=None):
"""
convert from bz2 to gz
"""
if not utils.is_bzipped(in_file):
return in_file
base, _ = os.path.splitext(in_file)
if out_dir:
gzipped_file = os.path.join(out_dir, os.path.basename(base) + ".gz")
else:
gzipped_file = base + ".gz"
if (fastq.is_fastq(base) and not objectstore.is_remote(in_file)):
if file_exists(gzipped_file):
return gzipped_file
message = "gzipping {in_file} to {gzipped_file}.".format(
in_file=in_file, gzipped_file=gzipped_file)
with file_transaction(gzipped_file) as tx_gzipped_file:
do.run("bunzip2 -c {in_file} | gzip > {tx_gzipped_file}".format(**locals()), message)
return gzipped_file
return in_file
def _pipeline_needs_fastq(config, data):
"""Determine if the pipeline can proceed with a BAM file, or needs fastq conversion.
"""
aligner = config["algorithm"].get("aligner")
support_bam = aligner in alignment.metadata.get("support_bam", [])
return aligner and not support_bam
def convert_bam_to_fastq(in_file, work_dir, data, dirs, config):
"""Convert BAM input file into FASTQ files.
"""
return alignprep.prep_fastq_inputs([in_file], data)
def merge(files, out_file, config):
"""merge smartly fastq files. It recognizes paired fastq files."""
pair1 = [fastq_file[0] for fastq_file in files]
if len(files[0]) > 1:
path = splitext_plus(out_file)
pair1_out_file = path[0] + "_R1" + path[1]
pair2 = [fastq_file[1] for fastq_file in files]
pair2_out_file = path[0] + "_R2" + path[1]
_merge_list_fastqs(pair1, pair1_out_file, config)
_merge_list_fastqs(pair2, pair2_out_file, config)
return [pair1_out_file, pair2_out_file]
else:
return _merge_list_fastqs(pair1, out_file, config)
def _merge_list_fastqs(files, out_file, config):
"""merge list of fastq files into one"""
if not all(map(fastq.is_fastq, files)):
raise ValueError("Not all of the files to merge are fastq files: %s " % (files))
assert all(map(utils.file_exists, files)), ("Not all of the files to merge "
"exist: %s" % (files))
if not file_exists(out_file):
files = [_gzip_fastq(fn) for fn in files]
if len(files) == 1:
if "remove_source" in config and config["remove_source"]:
shutil.move(files[0], out_file)
else:
os.symlink(files[0], out_file)
return out_file
with file_transaction(out_file) as file_txt_out:
files_str = " ".join(list(files))
cmd = "cat {files_str} > {file_txt_out}".format(**locals())
do.run(cmd, "merge fastq files %s" % files)
return out_file
| a113n/bcbio-nextgen | bcbio/pipeline/fastq.py | Python | mit | 5,886 | [
"Bowtie"
] | 1d25bcd607edd20d0466bfdf8589e771fe632d0fb0af7b1e1f749ad89bee20ff |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Rnaquast(Package):
"""Quality assessment of de novo transcriptome assemblies from RNA-Seq data
rnaQUAST is a tool for evaluating RNA-Seq assemblies using reference genome
and gene database. In addition, rnaQUAST is also capable of estimating gene
database coverage by raw reads and de novo quality assessment
using third-party software."""
homepage = "https://github.com/ablab/rnaquast"
url = "https://github.com/ablab/rnaquast/archive/refs/tags/v2.2.0.tar.gz"
maintainers = ['dorton21']
version('2.2.0', sha256='117dff9d9c382ba74b7b0ff24bc7b95b9ca6aa701ebf8afd22943aa54e382334')
depends_on('python@2.5:', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-joblib', type=('build', 'run'))
depends_on('py-gffutils', type=('build', 'run'))
depends_on('gmap-gsnap', type=('build', 'run'))
depends_on('blast-plus', type=('build', 'run'))
def install(self, spec, prefix):
install_tree('.', prefix.bin)
os.rename('%s/rnaQUAST.py' % prefix.bin, '%s/rnaQUAST' % prefix.bin)
def setup_run_environment(self, env):
env.prepend_path('PATH', prefix.bin)
| LLNL/spack | var/spack/repos/builtin/packages/rnaquast/package.py | Python | lgpl-2.1 | 1,420 | [
"BLAST"
] | 1382a6e9feaf953131a809efb8b4df5c2e1c3438a06c46023fb72596b7427e64 |
'''
sbclearn (c) University of Manchester 2018
sbclearn is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
# pylint: disable=wrong-import-order
import itertools
import sys
from keras.optimizers import Adam
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score, train_test_split, \
GridSearchCV
from sklearn.preprocessing.data import StandardScaler
from sklearn.svm import SVR
from sklearn.tree.tree import DecisionTreeRegressor
from gg_learn.keras import regress_lstm
from gg_learn.nicole import get_aligned_data, get_data
from gg_learn.utils import transformer
from gg_learn.utils.biochem_utils import get_ordinal_seq, \
get_ordinal_seq_padded
import numpy as np
# from sklearn.ensemble.forest import RandomForestRegressor
# from sklearn.linear_model import LinearRegression
# from sklearn.tree.tree import DecisionTreeRegressor
def analyse_padded(df, layer_units=None, dropout=0.5,
lr=0.00025, batch_size=10, epochs=50):
'''Analyse.'''
X = get_ordinal_seq_padded(df['seq'])
y = df['geraniol'].fillna(0)
if layer_units is None:
layer_units = [64, 64, 64]
score = regress_lstm(X, y,
layer_units=layer_units, dropout=dropout,
optimizer=Adam(lr=lr),
batch_size=batch_size, epochs=epochs)
print('Score: %.2f RMSE' % (score))
def analyse_unpadded(df):
'''Analyse.'''
X = get_ordinal_seq(df['seq'])
y = df['geraniol'].fillna(0)
score = regress_lstm(X, y, optimizer=Adam(lr=0.00025),
batch_size=1, epochs=50)
print('Score: %.2f RMSE' % (score))
def analyse_aligned(df):
'''Analyse aligned data.'''
aligned_data = get_aligned_data(df)
_hi_level_investigation(aligned_data)
encoded = transformer.AminoAcidTransformer().transform(aligned_data)
X, y = encoded[:, 2:], encoded[:, 1]
X = StandardScaler().fit_transform(X)
# _grid_search_extra_trees(X, y, cv)
# _grid_search_svr(X, y, cv)
_grid_search_random_forest(X, y, cv=10)
_predict(RandomForestRegressor(), X, y)
def do_grid_search(estimator, X, y, cv, param_grid=None, verbose=False):
'''Perform grid search.'''
if not param_grid:
param_grid = {}
grid_search = GridSearchCV(estimator,
param_grid,
scoring='neg_mean_squared_error',
cv=cv,
verbose=verbose)
grid_search.fit(X, y)
res = grid_search.cv_results_
for mean, params in sorted(zip(res['mean_test_score'], res['params']),
reverse=True):
print(np.sqrt(-mean), params)
print()
def _hi_level_investigation(data):
'''Perform high-level investigation.'''
transformers = [
transformer.OneHotTransformer(nucl=False),
transformer.AminoAcidTransformer()]
estimators = [
LinearRegression(),
DecisionTreeRegressor(),
RandomForestRegressor(),
ExtraTreesRegressor(),
GradientBoostingRegressor(),
SVR(kernel='poly')
]
cv = 10
for trnsfrmr, estimator in itertools.product(transformers, estimators):
encoded = trnsfrmr.transform(data)
X, y = encoded[:, 2:], encoded[:, 1]
X = StandardScaler().fit_transform(X)
scores = cross_val_score(estimator,
X, y,
scoring='neg_mean_squared_error',
cv=cv,
verbose=False)
scores = np.sqrt(-scores)
print('\t'.join([trnsfrmr.__class__.__name__,
estimator.__class__.__name__,
str((scores.mean(), scores.std()))]))
print()
def _grid_search_random_forest(X, y, cv):
'''Grid search with ExtraTreesRegressor.'''
param_grid = { # 'min_samples_split': [2, 5, 10],
'max_depth': [None, 1, 2, 5],
# 'min_samples_leaf': [2, 5, 10],
'max_leaf_nodes': [None, 2, 5],
'n_estimators': [10, 20, 50],
# 'min_weight_fraction_leaf': [0, 0.1, 0.2]
}
do_grid_search(RandomForestRegressor(), X, y, cv, param_grid)
def _grid_search_svr(X, y, cv):
'''Grid search with SVR.'''
param_grid = {'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'degree': range(1, 4),
'epsilon': [1 * 10**n for n in range(-1, 1)],
'gamma': ['auto'] + [1 * 10**n for n in range(-1, 1)],
'coef0': [1 * 10**n for n in range(-4, 1)],
'tol': [1 * 10**n for n in range(-4, 1)],
'C': [1 * 10**n for n in range(-1, 1)]
}
do_grid_search(SVR(kernel='poly'), X, y, cv, param_grid)
def _predict(estimator, X, y, tests=25, test_size=0.05):
'''Predict.'''
y_tests = []
y_preds = []
for _ in range(0, tests):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_size)
estimator.fit(X_train, y_train)
y_tests.extend(y_test)
y_preds.extend(estimator.predict(X_test))
def main(args):
'''main method.'''
df = get_data(args[0], args[1:] if len(args) > 1 else None)
df.to_csv('geraniol.csv')
analyse_padded(df, batch_size=25, epochs=3)
# analyse_aligned(df)
if __name__ == '__main__':
main(sys.argv[1:])
| synbiochem/synbiochem-learn | gg_learn/nicole/nicole.py | Python | mit | 5,781 | [
"VisIt"
] | 0019891aaf8b0e3a3d3706ee272d388ad07f97c4229fe2c1c7c8ced798f28902 |
#!/usr/bin/env python3
"""
Unit tests for MetSim
"""
import os
import subprocess
import tempfile
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import metsim.cli.ms as cli
import metsim.metsim
from metsim.metsim import MetSim
from metsim import io
class DummyOpts:
def __init__(self, config):
self.config = config
self.scheduler = 'threading'
self.verbose = False
self.num_workers = 1
# Parameters to test over
in_fmts = ['ascii', 'binary', 'netcdf']
methods = ['mtclim']
timesteps = [1440, 30]
# Where datasets for each input type are found
data_locations = {'netcdf': './metsim/data/test.nc',
'ascii': './metsim/data/ascii/',
'binary': './metsim/data/binary/'}
# Domain files to use
domain_files = {'netcdf': './metsim/data/domain.nc',
'ascii': './metsim/data/stehekin.nc',
'binary': './metsim/data/stehekin.nc'}
# State files to use
state_files = {'netcdf': './metsim/data/state_nc.nc',
'ascii': './metsim/data/state_vic.nc',
'binary': './metsim/data/state_vic.nc'}
# Dates to run over
dates = {'netcdf': (pd.datetime(1950, 1, 1), pd.datetime(1950, 1, 31)),
'binary': (pd.datetime(1949, 1, 1), pd.datetime(1949, 12, 31)),
'ascii': (pd.datetime(1949, 1, 1), pd.datetime(1949, 12, 31))}
# Domain vars
domain_section = {'netcdf': OrderedDict(lat='lat', lon='lon', mask='mask',
elev='elev'),
'binary': OrderedDict(lat='lat', lon='lon', mask='mask',
elev='elev'),
'ascii': OrderedDict(lat='lat', lon='lon', mask='mask',
elev='elev')}
# Input vars
in_vars_section = {'netcdf': OrderedDict(Prec='prec', Tmax='t_max',
Tmin='t_min', wind='wind'),
'binary': OrderedDict([('prec', '40.0 unsigned'),
('t_max', '100.0 signed'),
('t_min', '100.0 signed'),
('wind', '100.0 signed')]),
'ascii': OrderedDict([('prec', 'prec'),
('t_max', 't_max'),
('t_min', 't_min'),
('wind', 'wind')])}
# All values should be in these ranges
data_ranges = {'temp': (-50, 40),
'prec': (0, 8),
'shortwave': (0, 1000),
'longwave': (0, 450),
'wind': (0, 10),
'vapor_pressure': (0, 2),
'air_pressure': (0, 101325),
'spec_humid': (0, 2),
'rel_humid': (0, 100)}
@pytest.fixture(params=in_fmts)
def in_format(request):
"""Input formats - see in_fmts`"""
return request.param
@pytest.fixture(params=methods)
def method(request):
"""Generation methods - see `methods`"""
return request.param
@pytest.fixture(params=timesteps)
def timestep(request):
"""Generation methods - see `methods`"""
return request.param
@pytest.fixture()
def domain_file():
"""Domain file containing elevation data"""
return "./tests/data/domain.nc"
@pytest.fixture()
def test_params(in_format, method, timestep):
"""Assemble the parameters for each combo"""
start = dates[in_format][0]
stop = dates[in_format][1]
in_vars = in_vars_section[in_format]
domain_vars = domain_section[in_format]
out_dir = tempfile.mkdtemp('results')
out_prefix = "forcing"
params = {'start': start,
'stop': stop,
'in_vars': in_vars,
'forcing_fmt': in_format,
'domain_fmt': 'netcdf',
'state_fmt': 'netcdf',
'domain': domain_files[in_format],
'state': state_files[in_format],
'method': method,
'calender': 'standard',
'time_step': timestep,
'time_grouper': None,
'out_dir': out_dir,
'out_state': os.path.join(out_dir, 'state.nc'),
'out_prefix': out_prefix,
'forcing_vars': in_vars,
'domain_vars': domain_vars}
return params
@pytest.fixture()
def test_setup(test_params, domain_file):
"""Tests the setup of the MetSim object"""
in_fmt = test_params['forcing_fmt']
loc = data_locations[in_fmt]
# Get the files and make sure the right amount exist
if in_fmt == 'binary' or in_fmt == 'ascii':
data_files = [os.path.join(loc, f) for f in os.listdir(loc)]
assert len(data_files) == 16
else:
data_files = loc
assert data_files == './metsim/data/test.nc'
test_params['forcing'] = data_files
# Test construction
ms = MetSim(test_params)
return ms
#def test_mtclim(test_setup):
# """Tests the ability to run successfully"""
# # Here we only test a single grid cell
# daily_out_vars = ['prec', 't_max', 't_min', 'wind', 'shortwave',
# 'tskc', 'pet', 'vapor_pressure']
# hourly_out_vars = ['prec', 'temp', 'shortwave', 'longwave',
# 'vapor_pressure', 'wind', 'rel_humid', 'spec_humid',
# 'air_pressure']
#
# # Load data and ensure the ready flag has been set
# test_setup.params['time_step'] = 1440
# test_setup.params['out_vars'] = daily_out_vars
#
# # Check to see that the data is valid
# assert isinstance(test_setup.met_data, xr.Dataset)
# n_days = len(test_setup.met_data.time)
#
# # Run the forcing generation, but not the disaggregation
# test_setup.run()
# daily = test_setup.open_output()
# print(daily)
# assert isinstance(daily, xr.Dataset)
# assert len(daily.time) == n_days
# for var in daily_out_vars:
# assert var in daily
#
# # Now test the disaggregation as well as forcing generation
# test_setup.params['time_step'] = 60
# test_setup.params['out_vars'] = hourly_out_vars
#
# # Check to see that the data is valid
# assert isinstance(test_setup.met_data, xr.Dataset)
#
# test_setup.run()
# hourly = test_setup.open_output().isel(lat=2, lon=2).to_dataframe()
# assert len(hourly) == (n_days * const.HOURS_PER_DAY)
# for var in test_setup.params['out_vars']:
# assert var in hourly
# l, h = data_ranges[var]
# vl = min(hourly[var].values)
# vh = max(hourly[var].values)
# print(var, vl, vh, l, h)
# assert hourly[var].between(l, h).all()
#
# # Now test sub-hourly disaggregation
# test_setup.params['time_step'] = 30
# test_setup.run()
# half_hourly = test_setup.open_output().isel(lat=1, lon=3).to_dataframe()
# assert len(half_hourly) == (2 * n_days * const.HOURS_PER_DAY)
def test_time_offset():
"""Tests to make sure that the time_offset option works"""
loc = data_locations['binary']
data_files = [os.path.join(loc, f) for f in os.listdir(loc)]
out_vars = ['prec', 'temp', 'shortwave', 'longwave', 'vapor_pressure',
'wind', 'rel_humid', 'spec_humid', 'air_pressure']
out_dir = '.'
params = {'start': dates['binary'][0],
'stop': dates['binary'][1],
'forcing_fmt': 'binary',
'domain_fmt': 'netcdf',
'state_fmt': 'netcdf',
'domain': './metsim/data/stehekin.nc',
'state': './metsim/data/state_vic.nc',
'forcing': data_files,
'method': 'mtclim',
'scheduler': 'threading',
'time_step': "60",
'out_dir': out_dir,
'out_state': os.path.join(out_dir, 'state.nc'),
'out_vars': {n: metsim.metsim.available_outputs[n]
for n in out_vars},
'forcing_vars': in_vars_section['binary'],
'domain_vars': domain_section['binary']
}
params1 = dict()
params1.update(params)
params2 = dict()
params2.update(params)
params1['period_ending'] = False
params2['period_ending'] = True
# Set up the MetSim object
ms1 = MetSim(params1)
ms2 = MetSim(params2)
assert ms1._times[1:] == ms2._times[:-1]
def test_variable_rename():
"""Tests to make sure that variable renaming works"""
loc = data_locations['binary']
data_files = [os.path.join(loc, f) for f in os.listdir(loc)]
out_dir = '.'
params = {'start': dates['binary'][0],
'stop': dates['binary'][1],
'forcing_fmt': 'binary',
'domain_fmt': 'netcdf',
'state_fmt': 'netcdf',
'domain': './metsim/data/stehekin.nc',
'state': './metsim/data/state_vic.nc',
'forcing': data_files,
'method': 'mtclim',
'scheduler': 'threading',
'time_step': "60",
'out_dir': out_dir,
'out_state': os.path.join(out_dir, 'state.nc'),
'out_vars': {
'prec': {'out_name': 'pptrate'},
'shortwave': {'out_name': 'SWRadAtm'}},
'forcing_vars': in_vars_section['binary'],
'domain_vars': domain_section['binary']
}
ms = MetSim(params)
ms.run()
ds = ms.open_output()
assert 'pptrate' in ds.variables
assert 'SWRadAtm' in ds.variables
def test_unit_conversion():
"""Tests to make sure that variable renaming works"""
loc = data_locations['binary']
data_files = [os.path.join(loc, f) for f in os.listdir(loc)]
out_dir = '.'
params = {'start': dates['binary'][0],
'stop': dates['binary'][1],
'forcing_fmt': 'binary',
'domain_fmt': 'netcdf',
'state_fmt': 'netcdf',
'domain': './metsim/data/stehekin.nc',
'state': './metsim/data/state_vic.nc',
'forcing': data_files,
'method': 'mtclim',
'scheduler': 'threading',
'time_step': "60",
'out_dir': out_dir,
'out_state': os.path.join(out_dir, 'state.nc'),
'out_vars': {
'prec': {'out_name': 'pptrate',
'units': 'mm s-1'},
'temp': {'out_name': 'airtemp',
'units': 'K'}},
'forcing_vars': in_vars_section['binary'],
'domain_vars': domain_section['binary']}
params1 = dict()
params1.update(params)
params2 = dict()
params2.update(params)
params2['out_vars'] = {
'prec': {'out_name': 'pptrate',
'units': 'mm timestep-1'},
'temp': {'out_name': 'airtemp',
'units': 'C'}}
ms1 = MetSim(params1)
ms1.run()
ds1 = ms1.open_output().load()
ds1.close()
time_step = int(params['time_step'])
sec_per_min = 60.
tol = 1e-4
ms2 = MetSim(params2)
ms2.run()
ds2 = ms2.open_output().load()
assert np.allclose(ds1['airtemp'].mean(),
ds2['airtemp'].mean()+273.15, atol=tol)
assert np.allclose(time_step * sec_per_min * ds1['pptrate'].mean(),
ds2['pptrate'].mean(), atol=tol)
def test_disaggregation_values():
"""Tests to make sure values are being generated correctly"""
# Set parameters
loc = data_locations['binary']
data_files = [os.path.join(loc, f) for f in os.listdir(loc)]
out_vars = ['prec', 'temp', 'shortwave', 'longwave', 'vapor_pressure',
'wind', 'rel_humid', 'spec_humid', 'air_pressure']
out_dir = tempfile.mkdtemp('results')
params = {'start': dates['binary'][0],
'stop': dates['binary'][1],
'forcing_fmt': 'binary',
'domain_fmt': 'netcdf',
'state_fmt': 'netcdf',
'domain': './metsim/data/stehekin.nc',
'state': './metsim/data/state_vic.nc',
'forcing': data_files,
'method': 'mtclim',
'scheduler': 'threading',
'time_step': "60",
'out_dir': out_dir,
'out_state': os.path.join(out_dir, 'state.nc'),
'out_vars': {n: metsim.metsim.available_outputs[n]
for n in out_vars},
'forcing_vars': in_vars_section['binary'],
'domain_vars': domain_section['binary']
}
# The location we will test against
loc = (1, 4)
def check_data(out, good, tol=0.03):
assert isinstance(out, pd.DataFrame)
for var in ms.params['out_vars'].keys():
# Check to make sure each variable has normalized
# rmse of less than 0.02
h = max([good[var].max(), out[var].max()])
l = min([good[var].min(), out[var].min()])
nrmse = np.sqrt((good[var] - out[var]).pow(2).mean()) / (h - l)
print(var, nrmse)
assert nrmse < tol
# Set up the MetSim object
ms = MetSim(params)
# Run MetSim and load in the validated data
ms.run()
ds = ms.open_output()
out = ds.isel(lat=loc[0], lon=loc[1]).to_dataframe()[out_vars]
good = pd.read_table('./metsim/data/validated_48.3125_-120.5625',
names=out_vars)
good.index = out.index
# Make sure the data comes out right
check_data(out, good)
ds.close()
# Now do 3 hourly
params['time_step'] = '180'
ms = MetSim(params)
ms.run()
ds = ms.open_output()
out = ds.isel(lat=loc[0], lon=loc[1]).to_dataframe()[out_vars]
good = pd.read_table('./metsim/data/three_hourly_48.3125_-120.5625',
names=out_vars)
good.index = out.index
# Make sure the data comes out right
check_data(out, good, tol=0.2)
ds.close()
def test_coordinate_dimension_matchup():
"""
This test checks that MetSim correctely adds a coordinate
if an input dataset is missing coordinate variables for the
chunked dimensions.
"""
var_rename = OrderedDict(
latitude='lat', longitude='lon', mask='mask',
elevation='elev', pptrate='prec', maxtemp='t_max', mintemp='t_min')
filename = './examples/example_dimtest.conf'
conf = io.read_config(DummyOpts(filename))
conf['out_dir'] = tempfile.mkdtemp('results')
ms = MetSim(conf)
ds = xr.open_dataset('./metsim/data/dim_test.nc')
assert 'hru' not in ds.coords
assert 'hru' in ms.met_data.coords
@pytest.mark.parametrize('kind', ['ascii', 'bin', 'nc',
'constant_vars_ascii',
'constant_vars_bin',
'constant_vars_nc'])
def test_examples(kind):
filename = './examples/example_{kind}.conf'.format(kind=kind)
conf = io.read_config(DummyOpts(filename))
out_dir = tempfile.mkdtemp('results')
conf['out_dir'] = out_dir
ms = MetSim(conf)
ms.run()
assert ms.open_output() is not None
def test_yaml_config():
filename = './examples/example_yaml.yaml'
conf = io.read_config(DummyOpts(filename))
out_dir = tempfile.mkdtemp('results')
conf['out_dir'] = out_dir
ms = MetSim(conf)
ms.run()
assert ms.open_output() is not None
| arbennett/MetSim | metsim/tests/test_metsim.py | Python | gpl-3.0 | 15,361 | [
"NetCDF"
] | 67a3153b59868e2910547215fb111b1be05d389f3be6135d21d985a6d8f92d54 |
'''
Created on 03.09.2014
@author: Jan-Hendrik Prinz, David W.H. Swenson
'''
from . import range_logic
import abc
from openpathsampling.netcdfplus import StorableNamedObject
import numpy as np
import warnings
# TODO: Make Full and Empty be Singletons to avoid storing them several times!
def join_volumes(volume_list, name=None):
"""
Make the union of a list of volumes. (Useful shortcut.)
Parameters
----------
volume_list : list of :class:`openpathsampling.Volume`
the list to be joined together
name : str or callable
string for name, or callable that creates string for name from
``volume_list``
Returns
-------
:class:`openpathsampling.UnionVolume`
the union of the elements of the list, or EmptyVolume if list is
empty
"""
volume = EmptyVolume()
# EmptyVolume is smart and knows its OR just takes the other
for vol in volume_list:
volume = volume | vol
if name is not None:
try:
name_str = name(volume_list)
except TypeError:
name_str = name
volume = volume.named(name_str)
return volume
class Volume(StorableNamedObject):
"""
A Volume describes a set of snapshots
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
super(Volume, self).__init__()
@abc.abstractmethod
def __call__(self, snapshot):
'''
Returns `True` if the given snapshot is part of the defined Region
'''
return False # pragma: no cover
def __str__(self):
'''
Returns a string representation of the volume
'''
return 'volume' # pragma: no cover
__hash__ = StorableNamedObject.__hash__
def __or__(self, other):
if self is other:
return self
elif type(other) is EmptyVolume:
return self
elif type(other) is FullVolume:
return other
else:
return UnionVolume(self, other)
def __xor__(self, other):
if self is other:
return EmptyVolume()
elif type(other) is EmptyVolume:
return self
elif type(other) is FullVolume:
return ~ self
else:
return SymmetricDifferenceVolume(self, other)
def __and__(self, other):
if self is other:
return self
elif type(other) is EmptyVolume:
return other
elif type(other) is FullVolume:
return self
else:
return IntersectionVolume(self, other)
def __sub__(self, other):
if self is other:
return EmptyVolume()
elif type(other) is EmptyVolume:
return self
elif type(other) is FullVolume:
return EmptyVolume()
else:
return RelativeComplementVolume(self, other)
def __invert__(self):
return NegatedVolume(self)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
class VolumeCombination(Volume):
"""
Logical combination of volumes.
This should be treated as an abstract class. For storage purposes, use
specific subclasses in practice.
"""
def __init__(self, volume1, volume2, fnc, str_fnc):
super(VolumeCombination, self).__init__()
self.volume1 = volume1
self.volume2 = volume2
self.fnc = fnc
self.sfnc = str_fnc
def __call__(self, snapshot):
# short circuit following JHP's implementation in ensemble.py
a = self.volume1(snapshot)
res_true = self.fnc(a, True)
res_false = self.fnc(a, False)
if res_false == res_true:
return res_true
else:
b = self.volume2(snapshot)
return self.fnc(a, b)
#return self.fnc(self.volume1.__call__(snapshot),
#self.volume2.__call__(snapshot))
def __str__(self):
return '(' + self.sfnc.format(str(self.volume1), str(self.volume2)) + ')'
def to_dict(self):
return {'volume1': self.volume1, 'volume2': self.volume2}
class UnionVolume(VolumeCombination):
""" "Or" combination (union) of two volumes."""
def __init__(self, volume1, volume2):
super(UnionVolume, self).__init__(
volume1=volume1,
volume2=volume2,
fnc=lambda a, b: a or b,
str_fnc='{0} or {1}'
)
class IntersectionVolume(VolumeCombination):
""" "And" combination (intersection) of two volumes."""
def __init__(self, volume1, volume2):
super(IntersectionVolume, self).__init__(
volume1=volume1,
volume2=volume2,
fnc=lambda a, b: a and b,
str_fnc='{0} and {1}'
)
class SymmetricDifferenceVolume(VolumeCombination):
""" "Xor" combination of two volumes."""
def __init__(self, volume1, volume2):
super(SymmetricDifferenceVolume, self).__init__(
volume1=volume1,
volume2=volume2,
fnc=lambda a, b: a ^ b,
str_fnc='{0} xor {1}'
)
class RelativeComplementVolume(VolumeCombination):
""" "Subtraction" combination (relative complement) of two volumes."""
def __init__(self, volume1, volume2):
super(RelativeComplementVolume, self).__init__(
volume1=volume1,
volume2=volume2,
fnc=lambda a, b: a and not b,
str_fnc='{0} and not {1}'
)
class NegatedVolume(Volume):
"""Negation (logical not) of a volume."""
def __init__(self, volume):
super(NegatedVolume, self).__init__()
self.volume = volume
def __call__(self, snapshot):
return not self.volume(snapshot)
def __str__(self):
return '(not ' + str(self.volume) + ')'
class EmptyVolume(Volume):
"""Empty volume: no snapshot can satisfy"""
def __init__(self):
super(EmptyVolume, self).__init__()
def __call__(self, snapshot):
return False
def __and__(self, other):
return self
def __or__(self, other):
return other
def __xor__(self, other):
return other
def __sub__(self, other):
return self
def __invert__(self):
return FullVolume()
def __str__(self):
return 'empty'
class FullVolume(Volume):
"""Volume which all snapshots can satisfy."""
def __init__(self):
super(FullVolume, self).__init__()
def __call__(self, snapshot):
return True
def __invert__(self):
return EmptyVolume()
def __and__(self, other):
return other
def __or__(self, other):
return self
def __xor__(self, other):
return ~ other
def __sub__(self, other):
return ~ other
def __str__(self):
return 'all'
class CVDefinedVolume(Volume):
"""
Volume defined by a range of a collective variable `collectivevariable`.
Contains all snapshots `snap` for which `lamba_min <=
collectivevariable(snap)` and `lambda_max > collectivevariable(snap)`.
Parameters
----------
collectivevariable : :class:`.CollectiveVariable`
the CV to base the volume on
lambda_min : float
minimum value of the CV
lambda_max : float
maximum value of the CV
"""
def __init__(self, collectivevariable, lambda_min=0.0, lambda_max=1.0):
super(CVDefinedVolume, self).__init__()
self.collectivevariable = collectivevariable
try:
self.lambda_min = lambda_min.__float__()
except AttributeError:
self.lambda_min = float(lambda_min)
try:
self.lambda_max = lambda_max.__float__()
except AttributeError:
self.lambda_max = float(lambda_max)
self._cv_returns_iterable = None # used to raise warnings
# Typically, the logical combinations are only done once. Because of
# this, it is worth passing these through a check to speed up the logic.
# To get all the usefulness of the range logic in a subclass, all you
# should need to override is _copy_with_new_range (so that it inits any
# extra info the subclass carries) and range_and/or/sub, so that they
# return the correct behavior for the new subclass. Everything else
# comes for free.
@property
def default_name(self):
return (str(self.lambda_min) + "<"
+ str(self.collectivevariable.name) + "<"
+ str(self.lambda_max))
def _copy_with_new_range(self, lmin, lmax):
"""Shortcut to make a CVDefinedVolume with all parameters the same as
this one except the range. This is useful for the range logic when
dealing with subclasses: just override this function to copy extra
information.
"""
return CVDefinedVolume(self.collectivevariable, lmin, lmax)
@staticmethod
def range_and(amin, amax, bmin, bmax):
return range_logic.range_and(amin, amax, bmin, bmax)
@staticmethod
def range_or(amin, amax, bmin, bmax):
return range_logic.range_or(amin, amax, bmin, bmax)
@staticmethod
def range_sub(amin, amax, bmin, bmax):
return range_logic.range_sub(amin, amax, bmin, bmax)
def _lrange_to_Volume(self, lrange):
"""Takes results from one of the range_logic functions and returns
the appropriate Volume.
Parameters
----------
lrange : None or 1 or list of 2-tuples
Key to the volume to be returned: None returns the EmptyVolume, 1
returns self, and a list of 2-tuples is __or__'d as (min,max) to
make a VolumeCombinations
Returns
-------
Volume
appriate volume according to lrange
Raises
------
ValueError
if the input lrange is not an allowed value
"""
if lrange is None:
return EmptyVolume()
elif lrange == 1:
return self
elif lrange == -1:
return FullVolume()
elif len(lrange) == 1:
return self._copy_with_new_range(lrange[0][0], lrange[0][1])
elif len(lrange) == 2:
return UnionVolume(
self._copy_with_new_range(lrange[0][0], lrange[0][1]),
self._copy_with_new_range(lrange[1][0], lrange[1][1])
)
else:
raise ValueError(
"lrange value not understood: {0}".format(lrange)
) # pragma: no cover
def __and__(self, other):
if (type(other) is type(self) and
self.collectivevariable == other.collectivevariable):
lminmax = self.range_and(self.lambda_min, self.lambda_max,
other.lambda_min, other.lambda_max)
return self._lrange_to_Volume(lminmax)
else:
return super(CVDefinedVolume, self).__and__(other)
def __or__(self, other):
if (type(other) is type(self) and
self.collectivevariable == other.collectivevariable):
lminmax = self.range_or(self.lambda_min, self.lambda_max,
other.lambda_min, other.lambda_max)
return self._lrange_to_Volume(lminmax)
else:
return super(CVDefinedVolume, self).__or__(other)
def __xor__(self, other):
if (type(other) is type(self) and
self.collectivevariable == other.collectivevariable):
# taking the shortcut here
return (self | other) - (self & other)
else:
return super(CVDefinedVolume, self).__xor__(other)
def __sub__(self, other):
if (type(other) is type(self) and
self.collectivevariable == other.collectivevariable):
lminmax = self.range_sub(self.lambda_min, self.lambda_max,
other.lambda_min, other.lambda_max)
return self._lrange_to_Volume(lminmax)
else:
return super(CVDefinedVolume, self).__sub__(other)
def _is_iterable(self, val):
try:
# openmm.Quantity erroneously allows iter, so use len
# besides, CVs shouldn't return generators
_ = len(val)
except TypeError:
return False
else:
cv = self.collectivevariable
warnings.warn("The CV '" + str(cv.name) + "' returns an "
"iterable. This may lead to problem in analysis.")
return True
def _get_cv_float(self, snapshot):
val = self.collectivevariable(snapshot)
if self._cv_returns_iterable is None:
self._cv_returns_iterable = self._is_iterable(val)
return val.__float__()
def __call__(self, snapshot):
l = self._get_cv_float(snapshot)
# we explicitly test for infinity to allow the user to
# define `lambda_min/max='inf'` also when using units
# an openmm unit cannot be compared to a python infinite float
if self.lambda_min != float('-inf') and self.lambda_min > l:
return False
if self.lambda_max != float('inf') and self.lambda_max <= l:
return False
return True
def __str__(self):
return '{{x|{2}(x) in [{0:g}, {1:g}]}}'.format(
self.lambda_min, self.lambda_max, self.collectivevariable.name)
class PeriodicCVDefinedVolume(CVDefinedVolume):
"""
As with `CVDefinedVolume`, but for a periodic order parameter.
Defines a Volume containing all states where collectivevariable, a periodic
function wrapping into the range [period_min, period_max], is in the
given range [lambda_min, lambda_max].
Attributes
----------
period_min : float (optional)
minimum of the periodic domain
period_max : float (optional)
maximum of the periodic domain
"""
_excluded_attr = ['wrap']
def __init__(
self, collectivevariable, lambda_min=0.0, lambda_max=1.0,
period_min=None, period_max=None):
super(PeriodicCVDefinedVolume, self).__init__(collectivevariable,
lambda_min, lambda_max)
self.period_min = period_min
self.period_max = period_max
if (period_min is not None) and (period_max is not None):
self._period_shift = period_min
self._period_len = period_max - period_min
if self.lambda_max - self.lambda_min > self._period_len:
raise Exception("Range of volume larger than periodic bounds.")
elif self.lambda_max-self.lambda_min == self._period_len:
# this is only the case that we really have a FullVolume
self.lambda_min = period_min
self.lambda_max = period_max
# hack: better to create factory, returning FullVolume
# this hack: https://stackoverflow.com/questions/38541015/
class MonkeyPatch(type(self)):
def __call__(self, *arg, **kwarg):
return True
self.__class__ = MonkeyPatch
else:
self.lambda_min = self.do_wrap(lambda_min)
self.lambda_max = self.do_wrap(lambda_max)
self.wrap = True
else:
self.wrap = False
def do_wrap(self, value):
"""Wraps `value` into the periodic domain."""
# this looks strange and mimics the modulo operation `%` while
# being fully compatible for openmm quantities and plain python as
# well working for ints and floats.
val = value - self._period_shift
# little trick to check for positivity without knowing the the units
# or if it actually has units
if val > val * 0:
return value - int(val / self._period_len) * self._period_len
else:
wrapped = value + int((self._period_len - val) / self._period_len) \
* self._period_len
if wrapped >= self._period_len:
wrapped -= self._period_len
return wrapped
# next few functions add support for range logic
def _copy_with_new_range(self, lmin, lmax):
return PeriodicCVDefinedVolume(self.collectivevariable, lmin, lmax,
self.period_min, self.period_max)
@staticmethod
def range_and(amin, amax, bmin, bmax):
return range_logic.periodic_range_and(amin, amax, bmin, bmax)
@staticmethod
def range_or(amin, amax, bmin, bmax):
return range_logic.periodic_range_or(amin, amax, bmin, bmax)
@staticmethod
def range_sub(amin, amax, bmin, bmax):
return range_logic.periodic_range_sub(amin, amax, bmin, bmax)
def __invert__(self):
# consists of swapping max and min
return PeriodicCVDefinedVolume(self.collectivevariable,
self.lambda_max, self.lambda_min,
self.period_min, self.period_max
)
def __call__(self, snapshot):
l = self._get_cv_float(snapshot)
if self.wrap:
l = self.do_wrap(l)
if self.lambda_min > self.lambda_max:
return l >= self.lambda_min or l < self.lambda_max
else:
return self.lambda_min <= l < self.lambda_max
def __str__(self):
if self.wrap:
fcn = 'x|({0}(x) - {2:g}) % {1:g} + {2:g}'.format(
self.collectivevariable.name,
self._period_len, self._period_shift)
if self.lambda_min < self.lambda_max:
domain = '[{0:g}, {1:g}]'.format(
self.lambda_min, self.lambda_max)
else:
domain = '[{0:g}, {1:g}] union [{2:g}, {3:g}]'.format(
self._period_shift, self.lambda_max,
self.lambda_min, self._period_shift+self._period_len)
return '{'+fcn+' in '+domain+'}'
else:
return '{{x|{2}(x) [periodic] in [{0:g}, {1:g}]}}'.format(
self.lambda_min, self.lambda_max,
self.collectivevariable.name)
class VoronoiVolume(Volume):
'''
Volume given by a Voronoi cell specified by a set of centers
Parameters
----------
collectivevariable : MultiRMSDCV
must be an MultiRMSDCV collectivevariable that returns several RMSDs
state : int
the index of the center for the chosen voronoi cell
Attributes
----------
collectivevariable : collectivevariable
the collectivevariable object
state : int
the index of the center for the chosen voronoi cell
'''
def __init__(self, collectivevariable, state):
super(VoronoiVolume, self).__init__()
self.collectivevariable = collectivevariable
self.state = state
def cell(self, snapshot):
'''
Returns the index of the voronoicell snapshot is in
Parameters
----------
snapshot : :class:`opensampling.engines.BaseSnapshot`
the snapshot to be tested
Returns
-------
int
index of the voronoi cell
'''
distances = self.collectivevariable(snapshot)
min_val = 1000000000.0
min_idx = -1
for idx, d in enumerate(distances):
if d < min_val:
min_val = d
min_idx = idx
return min_idx
def __call__(self, snapshot, state=None):
'''
Returns `True` if snapshot belongs to voronoi cell in state
Parameters
----------
snapshot : :class:`opensampling.engines.BaseSnapshot`
snapshot to be tested
state : int or None
index of the cell to be tested. If `None` (Default) then the
internal self.state is used
Returns
-------
bool
returns `True` is snapshot is on the specified voronoi cell
'''
if state is None:
state = self.state
return self.cell(snapshot) == state
# class VolumeFactory(object):
# @staticmethod
# def _check_minmax(minvals, maxvals):
# # if one is an integer, convert it to a list
# if type(minvals) == int or type(minvals) == float:
# if type(maxvals) == list:
# minvals = [minvals]*len(maxvals)
# else:
# raise ValueError("minvals is a scalar; maxvals is not a list")
# elif type(maxvals) == int or type(maxvals) == float:
# if type(minvals) == list:
# maxvals = [maxvals]*len(minvals)
# else:
# raise ValueError("maxvals is a scalar; minvals is not a list")
# if len(minvals) != len(maxvals):
# raise ValueError("len(minvals) != len(maxvals)")
# return (minvals, maxvals)
# @staticmethod
# def CVRangeVolumeSet(op, minvals, maxvals):
# # TODO: clean up to only use min_i or max_i in name if necessary
# minvals, maxvals = VolumeFactory._check_minmax(minvals, maxvals)
# myset = []
# for (min_i, max_i) in zip(minvals, maxvals):
# volume = CVDefinedVolume(op, min_i, max_i)
# myset.append(volume)
# return myset
# @staticmethod
# def CVRangeVolumePeriodicSet(op, minvals, maxvals,
# period_min=None, period_max=None):
# minvals, maxvals = VolumeFactory._check_minmax(minvals, maxvals)
# myset = []
# for i in range(len(maxvals)):
# myset.append(PeriodicCVDefinedVolume(op, minvals[i], maxvals[i],
# period_min, period_max))
# return myset
| dwhswenson/openpathsampling | openpathsampling/volume.py | Python | mit | 21,931 | [
"OpenMM"
] | d8609a874f37ad776b343d551895b0a069917a79fa1a5bc143282e762bbe42f2 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, datetime
def csvEnabled(input_tree):
outputs = input_tree.getBlockInfo("/Outputs")
if outputs and outputs.included:
p = outputs.getParamInfo("csv")
return p.value == "true"
return False
def _getFileBase(outputs, inputfilename):
"""
Get the file base to be written out based on parameters in /Outputs
Input:
outputs[BlockInfo]: corresponding to /Outputs
inputfilename[str]: The input file name
Return:
(bool whether file_base was set, file base)
"""
file_base = outputs.getParamInfo("file_base")
file_base_set = False
if outputs.included and file_base and file_base.value:
file_base_set = True
fname = file_base.value
else:
fname = os.path.splitext(os.path.basename(inputfilename))[0]
return file_base_set, fname
def getPostprocessorFiles(input_tree, inputfilename):
"""
Get a list of /Postprocessors files that will be written.
Input:
input_tree[InputTree]: The InputTree to get blocks from
inputfilename[str]: The input file name
Return:
list[str]: file names
"""
outputs = input_tree.getBlockInfo("/Outputs")
output_file_names = []
is_set, common_file_base = _getFileBase(outputs, inputfilename)
if not is_set:
common_file_base += "_out"
output_file_names.append("%s.csv" % common_file_base)
return output_file_names
def getVectorPostprocessorFiles(input_tree, inputfilename):
"""
Get a list of /VectorPostprocessors files that will be written.
Input:
input_tree[InputTree]: The InputTree to get blocks from
inputfilename[str]: The input file name
Return:
list[str]: file names
"""
outputs = input_tree.getBlockInfo("/Outputs")
output_file_names = []
is_set, common_file_base = _getFileBase(outputs, inputfilename)
if not is_set:
common_file_base += "_out"
pp = input_tree.getBlockInfo("/VectorPostprocessors")
for p in pp.children.values():
file_base = _getChildFileBase(common_file_base, p)
output_file_names.append("%s_*.csv" % file_base)
return output_file_names
def _getChildFileBase(common_file_base, child):
"""
Get the file base for outputs.
Input:
common_file_base[str]: The default file base
child[BlockInfo]: Child node of /Outputs
Return:
str: file base
"""
file_base = "%s_%s" % (common_file_base, child.name)
file_base_param = child.getParamInfo("file_base")
if file_base_param and file_base_param.value:
file_base = file_base_param.value
return file_base
def getOutputFiles(input_tree, inputfilename):
"""
Inspects the "/Output" node and gets a list of output files that the input file will write.
Input:
input_tree[InputTree]: The InputTree to get blocks from
inputfilename[str]: The input file name
Return:
list[str]: Output filenames
"""
outputs = input_tree.getBlockInfo("/Outputs")
output_file_names = []
is_set, common_file_base = _getFileBase(outputs, inputfilename)
exodus = outputs.getParamInfo("exodus")
if outputs.included and exodus and exodus.value == "true":
if is_set:
output_file_names.append("%s.e" % common_file_base)
else:
output_file_names.append("%s_out.e" % common_file_base)
for child in outputs.children.values():
if not child.included:
continue
type_param = child.getParamInfo("type")
if type_param.value != "Exodus":
continue
file_base = _getChildFileBase(common_file_base, child)
oversample = child.getParamInfo("oversample")
append_oversample = child.getParamInfo("append_oversample")
if oversample and oversample.value != "false" and append_oversample and append_oversample.value != "false":
file_base = file_base + '_oversample'
append_date = child.getParamInfo("append_date")
if append_date and append_date.value != "false":
utc = datetime.datetime.utcnow()
date_format = child.getParamInfo("append_date_format")
d_str = utc.isoformat()
if date_format and date_format.value != "":
try:
d_str = utc.strftime(date_format.value)
except:
pass
file_base = "%s_%s" % (file_base, d_str)
output_file_names.append(file_base + '.e')
return output_file_names
| nuclear-wizard/moose | python/peacock/Input/OutputNames.py | Python | lgpl-2.1 | 4,849 | [
"MOOSE"
] | ae4ca919bfc48cb42709a4d8fad3e553510ff60d3c929d1cdeb500a2eeec2731 |
from ase import *
from gpaw import GPAW
from gpaw.utilities import equal
a = 4.8 # => N = 4.8 / 0.2 = 24
loa = Atoms([Atom('C', [a / 2 + .3, a / 2 -.1, a / 2], magmom=2)],
pbc=False,
cell=(a, a, a))
p = []
exx = []
i = 0
for hosts in [1, 4]:
calc = GPAW(convergence={'eigenstates': 1e-6}, hosts=hosts,
txt='exx_parallel.txt')
loa.set_calculator(calc)
p.append(loa.get_potential_energy())
exx.append(calc.get_exact_exchange())
print 'number of CPUs :', hosts
print 'Potential energy :', p[i]
print 'Exchange energy :', exx[i]
print ''
i += 1
for i in range(1, len(exx)):
equal(p[i], p[0], 1e-2)
equal(exx[i], exx[0], 1e-2)
## number of CPUs : 1
## Potential energy : -1.07206007502
## Exchange energy : -137.443595686
## number of CPUs : 2
## Potential energy : -1.07206007502
## Exchange energy : -137.443595686
## number of CPUs : 3
## Potential energy : -1.0721486372
## Exchange energy : -137.405085235
## number of CPUs : 4
## Potential energy : -1.07194681834
## Exchange energy : -137.441377715
| qsnake/gpaw | oldtest/exx_parallel.py | Python | gpl-3.0 | 1,132 | [
"ASE",
"GPAW"
] | 0511777454c9e69271cb16f3ded6f5988c477edff5307984aac7a3c116bca382 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("propertytrack.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| angryjoe/propertytrack | config/urls.py | Python | bsd-3-clause | 1,235 | [
"VisIt"
] | 83e98e19f44f1b2d52d13438da137c742c87c6d6959c704cb50826400ca13856 |
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to
# submit large numbers of jobs on supercomputers. It provides a python interface to physical input,
# such as crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential
# programs. It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
# -*- coding: utf-8 -*-
""" Pwscf functional """
__docformat__ = "restructuredtext en"
__all__ = ['Pwscf']
from ..espresso import logger
from traitlets import HasTraits, Instance
from ..tools import stateless, assign_attributes
from . import Namelist
from .card import Card
from .pwscf_namelists import Control, System, Electrons, Ions, Cell, alias
from .namelists import input_transform
class Pwscf(HasTraits):
""" Wraps up Pwscf in python """
control = Instance(Control, args=(), kw={}, allow_none=False)
system = Instance(System, args=(), kw={}, allow_none=False)
electrons = Instance(Electrons, args=(), kw={}, allow_none=False)
ions = Instance(Ions, args=(), kw={}, allow_none=False)
cell = Instance(Cell, args=(), kw={}, allow_none=False)
k_points = Instance(Card, args=('K_POINTS',), kw={'subtitle': 'gamma'}, allow_none=False,
help="Defines the set of k-points for the calculation")
kpoints = alias(k_points)
__private_cards = ['atomic_species']
""" Cards that are handled differently by Pwscf
For instance, atomic_species is handled the species attribute.
"""
def __init__(self, **kwargs):
from . import Namelist
super(Pwscf, self).__init__(**kwargs)
self.__namelists = Namelist()
self.__cards = {}
self.species = {}
""" Dictionary of species that can be used in the calculation
A specie is an object with at least a 'filename' attribute pointing to the
pseudo-potential.
"""
def __getattr__(self, name):
""" look into extra cards and namelists """
if name in self.__cards:
return self.__cards[name]
elif hasattr(self.__namelists, name):
return getattr(self.__namelists, name)
return super(Pwscf, self).__getattribute__(name)
def add_specie(self, name, pseudo, **kwargs):
""" Adds a specie to the current known species """
from .specie import Specie
self.species[name] = Specie(name, pseudo, **kwargs)
def write(self, stream=None, structure=None, **kwargs):
""" Writes Pwscf input
- if stream is None (default), then returns a string containing namelist in fortran
format
- if stream is a string, then it should a path to a file
- otherwise, stream is assumed to be a stream of some sort, with a `write` method
"""
from .. import error
from .namelists import InputTransform
from .misc import write_pwscf_input
from copy import copy
namelist = copy(self.__namelists)
cards = copy(self.__cards)
for key in self.trait_names():
value = getattr(self, key)
if isinstance(value, Namelist):
setattr(namelist, key, value)
elif isinstance(value, Card):
if value.name in cards:
raise error.internal("Found two cards with the same name")
cards[value.name] = value
cards = list(cards.values())
f90namelist = namelist.namelist(structure=structure, **kwargs)
for transform in self.__class__.__dict__.values():
if isinstance(transform, InputTransform):
logger.debug("Transforming input using method %s" % transform.method.__name__)
transform.method(self, f90namelist, cards=cards, structure=structure, **kwargs)
return write_pwscf_input(f90namelist, cards, stream)
@input_transform
def __add_structure_to_input(self, dictionary=None, cards=None, structure=None, **kwargs):
from .structure_handling import add_structure
if structure is None:
return
add_structure(structure, dictionary, cards)
atomic_species = self._write_atomic_species_card(structure)
# filter cards in-place: we need to modify the input sequence itself
for i, u in enumerate(list(cards)):
if u.name in 'atomic_species':
cards.pop(i)
cards.append(atomic_species)
@input_transform
def __delete_ions_and_cells_if_not_relaxing(self, dictionary, **kwargs):
if self.control.calculation not in ['relax', 'md', 'vc-relax', 'vc-md']:
dictionary.pop('ions', None)
if self.control.calculation not in ['vc-relax', 'vc-md']:
dictionary.pop('cell', None)
def read(self, filename, clear=True):
""" Read from a file """
from ..misc import local_path
from .card import read_cards
# read namelists first
if clear:
self.__namelists.clear()
self.__cards = {}
for name in self.trait_names():
value = getattr(self, name)
if hasattr(value, 'clear'):
value.clear()
filename = local_path(filename)
logger.info("%s: Reading from file %s", self.__class__.__name__, filename)
self.__namelists.read(filename)
traits = set(self.trait_names()).intersection(self.__namelists.names())
for traitname in traits:
newtrait = getattr(self.__namelists, traitname)
delattr(self.__namelists, traitname)
trait = getattr(self, traitname)
for key in newtrait.names():
setattr(trait, key, getattr(newtrait, key))
# Then read all cards
for card in read_cards(filename):
if card.name in self.trait_names():
getattr(self, card.name).subtitle = card.subtitle
getattr(self, card.name).value = card.value
elif card.name in self.__private_cards:
if hasattr(self, '_read_%s_card' % card.name):
getattr(self, '_read_%s_card' % card.name)(card)
else:
logger.debug('%s is handled internally' % card.name)
else:
self.__cards[card.name] = card
def add_card(self, name, value=None, subtitle=None):
""" Adds a new card, or sets the value of an existing one """
if isinstance(getattr(self, name, None), Card):
card = getattr(self, name)
elif card.name in self.__private_cards:
logger.warn('%s is handled internally' % card.name)
return
else:
logger.info("%s: Adding new card %s", self.__class__.__name__, name)
card = Card(name)
self.__cards[name] = card
card.subtitle = subtitle
card.value = value
def add_namelist(self, name, **kwargs):
""" Adds a new namelist, or sets the value of an existing one """
from .namelists import Namelist
if isinstance(getattr(self, name, None), Namelist):
namelist = getattr(self, name)
namelist.clear()
else:
logger.info("%s: Adding new namelist %s", self.__class__.__name__, name)
namelist = Namelist()
setattr(self.__namelists, name, namelist)
for key, value in kwargs.items():
setattr(namelist, key, value)
@stateless
@assign_attributes(ignore=['overwrite', 'comm', 'restart', 'program'])
def iter(self, structure, outdir=".", comm=None, overwrite=False, restart=None, **kwargs):
""" Allows asynchronous Pwscf calculations
This is a generator which yields two types of objects:
.. code:: python
yield Program(program="pw.x", outdir=outdir)
yield Extract(outdir=outdir)
- :py:class:`~pylada.process.program.ProgramProcess`: once started, this process will
run an actual Pwscf calculation.
- :py:attr:`Extract`: once the program has been runned, and extraction object is
yielded, in order that the results of the run can be analyzed.
:param structure:
:py:class:`~pylada.crystal.Structure` structure to compute.
:param outdir:
Output directory where the results should be stored. This
directory will be checked for restart status, eg whether
calculations already exist. If None, then results are stored in
current working directory.
:param comm:
Holds arguments for executing VASP externally.
:param overwrite:
If True, will overwrite pre-existing results.
If False, will check whether a successful calculation exists. If
one does, then does not execute.
:param kwargs:
Any attribute of the Pwscf instance can be overridden for
the duration of this call by passing it as keyword argument:
>>> for program in vasp.iter(structure, outdir, sigma=0.5):
...
The above would call VASP_ with smearing of 0.5 eV (unless a
successfull calculation already exists, in which case the
calculations are *not* overwritten).
:yields: A process and/or an extraction object, as described above.
:raise RuntimeError: when computations do not complete.
:raise IOError: when outdir exists but is not a directory.
.. note::
This function is stateless. It expects that self and structure can
be deepcopied correctly.
.. warning::
This will never overwrite successfull Pwscf calculation, even if the
parameters to the call are different.
"""
from ..misc import local_path
from .. import pwscf_program
from ..process.program import ProgramProcess
outdir = local_path(outdir)
logger.info('Running Pwscf in: %s' % outdir)
outdir.ensure(dir=True)
# check for pre-existing and successful run.
if not overwrite:
# Check with this instance's Extract, cos it is this calculation we shall
# do here. Derived instance's Extract might be checking for other stuff.
extract = self.Extract(str(outdir))
if extract.success:
yield extract # in which case, returns extraction object.
return
# if restarting, gets structure, sets charge density and wavefuntion at start
# otherwise start passes structure back to caller
structure = self._restarting(structure, restart, outdir)
# copies/creates file environment for calculation.
self._bring_up(structure, outdir, comm=comm, overwrite=overwrite)
# figures out what program to call.
program = kwargs.get('program', getattr(self, 'program', pwscf_program))
if program == None:
raise RuntimeError('program was not set in the espresso functional')
logger.info("Pwscf program: %s" % program)
cmdline = program.rstrip().lstrip().split()[1:]
program = program.rstrip().lstrip().split()[0]
def onfinish(process, error):
self._bring_down(outdir, structure)
stdout = self.control.prefix + ".out"
stderr = self.control.prefix + ".err"
stdin = self.control.prefix + ".in"
yield ProgramProcess(program, cmdline=cmdline, outdir=str(outdir), onfinish=onfinish,
stdin=stdin, stdout=stdout, stderr=stderr,
dompi=comm is not None)
# yields final extraction object.
yield self.Extract(str(outdir))
def pseudos_do_exist(self, structure, verbose=False):
""" True if it all pseudos exist
:raises error.KeyError: if no species defined
"""
from .specie import Specie
from .. import error
for specie_name in set([u.type for u in structure]):
if specie_name not in self.species:
msg = "No specie defined for %s: no way to get pseudopotential" % specie_name
raise error.KeyError(msg)
specie = self.species[specie_name]
if not Specie(specie_name, specie.pseudo).file_exists(self.control.pseudo_dir):
if verbose:
logger.critical(
"Specie %s: pseudo = %s" % (specie_name, specie.pseudo))
return False
return True
def _restarting(self, structure, restart, outdir):
""" Steps to take when restarting
if restarting, gets structure, sets charge density and wavefuntion at start. Otherwise
passes structure back to caller.
"""
from numpy import max, abs
from .. import error
if restart is None:
return structure
# normalize: restart could be an Extract object, or a path
restart = self.Extract(restart)
if not restart.success:
logger.critical("Cannot restart from unsuccessful calculation")
raise error.RuntimeError("Cannot restart from unsuccessful calculation")
save_dir = restart.abspath.join('%s.save' % restart.prefix)
if save_dir.check(dir=True):
logger.info("Restarting from data in %s" % save_dir)
save_dir.copy(outdir.join('%s.save' % self.control.prefix))
dist = (max(abs(restart.structure.cell - restart.initial_structure.cell))
/ max(abs(restart.structure.cell)))
if save_dir.join('charge-density.dat').check(file=True) and dist < 1e-3:
self.electrons.startingpot = 'file'
elif self.electrons.startingpot == 'file':
logger.warning("No charge density found, setting startingpot to atomic")
self.electrons.startingpot = 'atomic'
wfcden = restart.abspath.join('%s.wfc1' % self.control.prefix)
if wfcden.check(file=True):
logger.info("Restarting from wavefunction file %s" % wfcden)
wfcden.copy(outdir.join(wfcden.basename))
self.electrons.startingwfc = 'file'
elif self.electrons.startingwfc == 'file':
logger.warning("No wavefunction file found, setting startingwfc to atomic+random")
self.electrons.startingwfc = 'atomic+random'
return restart.structure
def _bring_up(self, structure, outdir, **kwargs):
""" Prepares for actual run """
from ..misc import chdir
logger.info('Preparing directory to run Pwscf: %s ' % outdir)
with chdir(outdir):
self.write(structure=structure,
stream=outdir.join("%s.in" % self.control.prefix),
outdir=outdir, **kwargs)
self.pseudos_do_exist(structure, verbose=True)
outdir.join('.pylada_is_running').ensure(file=True)
def _write_atomic_species_card(self, structure):
""" Creates atomic-species card """
from quantities import atomic_mass_unit
from .. import periodic_table, error
from .card import Card
result = Card('atomic_species', value="")
# Check peudo-files exist
for specie_name in set([u.type for u in structure]):
if specie_name not in self.species:
msg = "No specie defined for %s: no way to get pseudopotential" % specie_name
raise error.RuntimeError(msg)
specie = self.species[specie_name]
mass = getattr(specie, 'mass', None)
if mass is None:
mass = getattr(getattr(periodic_table, specie_name, None), 'mass', 1)
if hasattr(mass, 'rescale'):
mass = float(mass.rescale(atomic_mass_unit))
result.value += "%s %s %s\n" % (specie_name, mass, specie.pseudo)
return result
def _read_atomic_species_card(self, card):
""" Adds atomic specie info to species dictionary """
for line in card.value.rstrip().lstrip().split('\n'):
name, mass, pseudo = line.split()
if name in self.species:
self.species[name].pseudo = pseudo
self.species[name].mass = float(mass)
else:
self.add_specie(name, pseudo, mass=float(mass))
def _bring_down(self, directory, structure):
from ..misc import local_path
directory = local_path(directory)
if directory.join('.pylada_is_running').check(file=True):
directory.join('.pylada_is_running').remove()
@classmethod
def Extract(class_, outdir, **kwargs):
from .extract import Extract
if hasattr(outdir, 'success') and hasattr(outdir, 'directory'):
return outdir
return Extract(outdir, **kwargs)
def __repr__(self):
from numpy import abs
from quantities import atomic_mass_unit
result = "pwscf = %s()\n" % self.__class__.__name__
for k, v in self.__dict__.items():
if k[0] != '_' and k != 'species':
result += "pwscf.%s = %s\n" % (k, repr(v))
for name, value in self.species.items():
result += "pwscf.add_specie('%s', '%s'" % (name, value.pseudo)
if abs(value.mass - atomic_mass_unit) > 1e-12:
result += ", mass=%s" % float(value.mass.rescale(atomic_mass_unit))
for k, v in value.__dict__.items():
if k[0] != '_' and k not in ['name', 'pseudo']:
result += ", %s=%s" % (k, repr(v))
result += ")\n"
for k, v in self.__cards.items():
if k[0] != '_':
result += "pwscf.%s = %s\n" % (k, repr(v))
for name in self.__namelists.names():
result += "pwscf.add_namelist(%s" % name
value = getattr(self, name)
for k in value.names():
v = getattr(value, k)
result += ", %s=%s" % (k, repr(v))
result += ")"
for name in self.trait_names():
value = getattr(self, name)
if hasattr(value, 'printattr'):
result += value.printattr("pwscf." + name)
return result
def __call__(self, structure, outdir=None, comm=None, overwrite=False, **kwargs):
""" Blocking call to pwscf
:returns: An extraction object of type :py:attr:`Extract`.
"""
from .. import error
for program in self.iter(
structure, outdir=outdir, comm=comm, overwrite=overwrite, **kwargs):
# iterator may yield the result from a prior successful run.
if getattr(program, 'success', False):
continue
# If the following is True, then the program failed to run correctly.
if not hasattr(program, 'start'):
break
# otherwise, it should yield a Program process to execute.
# This next line starts the asynchronous call to the external VASP
# program.
program.start(comm)
# This next lines waits until the VASP program is finished.
program.wait()
# Last yield should be an extraction object.
if not program.success:
print(program)
raise error.RuntimeError("Pwscf failed to execute correctly.")
return program
| pylada/pylada-light | src/pylada/espresso/functional.py | Python | gpl-3.0 | 20,437 | [
"CRYSTAL",
"ESPResSo",
"VASP"
] | 90b2d93a8ff1414ad3bd291c14af229d302c2b2ce19e8b23bb1f7092a057a425 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools.extension import Extension
from seqlib.version import __version__
from Cython.Build import cythonize
ext_modules = [Extension('seqlib.align', ['seqlib/align.pyx'])]
setup(name='seqlib',
version=__version__,
description='NGS analysis toolkits',
author='Xiao-Ou Zhang',
author_email='kepbod@gmail.com',
url='https://github.com/kepbod/seqlib',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='NGS',
packages=['seqlib'],
package_data={'seqlib': ['data/*.msg']},
ext_modules=cythonize(ext_modules),
install_requires=[
'future',
'requests',
'pysam>=0.8.4',
'pybedtools>=0.7.8',
'docopt',
'beautifulsoup4',
'lxml',
'Cython'
],
scripts=[
'bin/fetch_geoinfo.py',
'bin/extract_junc.py',
'bin/extract_region.py'
]
)
| kepbod/seqlib | setup.py | Python | mit | 1,265 | [
"pysam"
] | c7ddf4678af42dc3baed4fc8f5477a242f26754161ade6d8d4d631c03c3a5664 |
#===============================================================================
#
# CUDAGenerator.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2016-2021 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
"""
The CUDAGenerator is responsible for the complete code generation process
in ANNarchy to support CUDA devices. Generate the header for a Population
object to run either on a Nvidia GPU using Nvidia SDK > 5.0 and CC > 2.0
"""
import re
import ANNarchy
from copy import deepcopy
from ANNarchy.core import Global
from ANNarchy.core.Population import Population
from ANNarchy.core.PopulationView import PopulationView
from ANNarchy.generator.Utils import generate_equation_code, tabify, check_and_apply_pow_fix, determine_idx_type_for_projection
from ANNarchy.generator.Population.PopulationGenerator import PopulationGenerator
from ANNarchy.generator.Projection.ProjectionGenerator import ProjectionGenerator, get_bounds
from ANNarchy.generator.Projection.CUDA import *
class CUDAGenerator(ProjectionGenerator):
"""
As stated in module description, inherits from ProjectionGenerator
and implements abstract functions.
"""
def __init__(self, profile_generator, net_id):
# The super here calls all the base classes, so first
# ProjectionGenerator and afterwards CUDAConnectivity
super(CUDAGenerator, self).__init__(profile_generator, net_id)
def header_struct(self, proj, annarchy_dir):
"""
Generate the codes for the pop[id].hpp file. This file contains
the c-style structure with all data members and equation codes (in
case of openMP).
"""
# Initial state
self._templates = deepcopy(BaseTemplates.cuda_templates)
self._template_ids = {}
# Select the C++ connectivity template
sparse_matrix_include, sparse_matrix_format, sparse_matrix_args, single_matrix = self._select_sparse_matrix_format(proj)
# configure Connectivity base class
self._configure_template_ids(proj)
# Initialize launch configuration
init_launch_config = self._generate_launch_config(proj)
# Generate declarations and accessors for the variables
decl, accessor = self._declaration_accessors(proj, single_matrix)
# concurrent streams
decl['cuda_stream'] = BaseTemplates.cuda_stream
# Initiliaze the projection
init_weights, init_delays, init_parameters_variables = self._init_parameters_variables(proj, single_matrix)
variables_body, variables_header, variables_call = self._update_synapse(proj)
# Update the random distributions
init_rng = self._init_random_distributions(proj)
# Post event
post_event_body, post_event_header, post_event_call = self._post_event(proj)
# Compute sum is the trickiest part
psp_header, psp_body, psp_call = self._computesum_rate(proj) if proj.synapse_type.type == 'rate' else self._computesum_spiking(proj)
# Detect event-driven variables
has_event_driven = False
for var in proj.synapse_type.description['variables']:
if var['method'] == 'event-driven':
has_event_driven = True
# Detect delays to eventually generate the code
has_delay = proj.max_delay > 1
# Connectivity template
if 'declare_connectivity_matrix' not in proj._specific_template.keys():
connector_call = self._connectivity_init(proj, sparse_matrix_format, sparse_matrix_args) % {
'sparse_format': sparse_matrix_format,
'init_weights': init_weights,
'init_delays': init_delays,
'rng_idx': "[0]" if single_matrix else "",
'add_args': "",
'num_threads': "",
'float_prec': Global.config["precision"],
'idx_type': determine_idx_type_for_projection(proj)[0]
}
declare_connectivity_matrix = ""
access_connectivity_matrix = ""
else:
sparse_matrix_format = "SpecificConnectivity"
sparse_matrix_args = ""
connector_call = ""
declare_connectivity_matrix = proj._specific_template['declare_connectivity_matrix']
access_connectivity_matrix = proj._specific_template['access_connectivity_matrix']
# Memory transfers
host_device_transfer, device_host_transfer = self._memory_transfers(proj)
# Memory management
determine_size_in_bytes = self._determine_size_in_bytes(proj)
clear_container = self._clear_container(proj)
# Local functions
host_local_func, device_local_func = self._local_functions(proj)
decl['parameters_variables'] += host_local_func
# Profiling
if self._prof_gen:
include_profile = """#include "Profiling.h"\n"""
declare_profile, init_profile = self._prof_gen.generate_init_projection(proj)
else:
include_profile = ""
init_profile = ""
declare_profile = ""
# Additional info (overwritten)
include_additional = ""
struct_additional = ""
init_additional = ""
access_additional = ""
if 'include_additional' in proj._specific_template.keys():
include_additional = proj._specific_template['include_additional']
if 'struct_additional' in proj._specific_template.keys():
struct_additional = proj._specific_template['struct_additional']
if 'init_additional' in proj._specific_template.keys():
init_additional = proj._specific_template['init_additional']
if 'access_additional' in proj._specific_template.keys():
access_additional = proj._specific_template['access_additional']
final_code = self._templates['projection_header'] % {
# version tag
'annarchy_version': ANNarchy.__release__,
# fill code templates
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'id_proj': proj.id,
'name_pre': proj.pre.name,
'name_post': proj.post.name,
'target': proj.target,
'float_prec': Global.config['precision'],
'sparse_matrix_include': sparse_matrix_include,
'sparse_format': sparse_matrix_format,
'sparse_format_args': sparse_matrix_args,
'include_additional': include_additional,
'include_profile': include_profile,
'struct_additional': struct_additional,
'declare_connectivity_matrix': declare_connectivity_matrix,
'access_connectivity_matrix': access_connectivity_matrix,
'declare_delay': decl['declare_delay'] if has_delay else "",
'declare_event_driven': decl['event_driven'] if has_event_driven else "",
'declare_rng': decl['rng'],
'declare_parameters_variables': decl['parameters_variables'],
'declare_cuda_stream': decl['cuda_stream'],
'declare_additional': decl['additional'],
'declare_profile': declare_profile,
'connector_call': connector_call,
'init_weights': init_weights,
'init_event_driven': "",
'init_rng': init_rng,
'init_launch_config': init_launch_config,
'init_parameters_variables': init_parameters_variables,
'init_additional': init_additional,
'init_profile': init_profile,
'access_parameters_variables': accessor,
'access_additional': access_additional,
'host_to_device': host_device_transfer,
'device_to_host': device_host_transfer,
'determine_size': determine_size_in_bytes,
'clear_container': clear_container
}
# Store the file in generate ( will be compared with files contained
# in build/ later on )
with open(annarchy_dir+'/generate/net'+str(self._net_id)+'/proj'+str(proj.id)+'.hpp', 'w') as ofile:
ofile.write(final_code)
# Build dictionary for inclusions in ANNarchy.cu
proj_desc = {
'include': """#include "proj%(id)s.hpp"\n""" % {'id': proj.id},
'extern': """extern ProjStruct%(id)s proj%(id)s;\n"""% {'id': proj.id},
'instance': """ProjStruct%(id)s proj%(id)s;\n"""% {'id': proj.id},
'init': """ proj%(id)s.init_projection();\n""" % {'id' : proj.id}
}
proj_desc['psp_header'] = psp_header
proj_desc['psp_body'] = psp_body
proj_desc['psp_call'] = psp_call
proj_desc['custom_func'] = device_local_func
proj_desc['update_synapse_header'] = variables_header
proj_desc['update_synapse_body'] = variables_body
proj_desc['update_synapse_call'] = variables_call
proj_desc['postevent_header'] = post_event_header
proj_desc['postevent_body'] = post_event_body
proj_desc['postevent_call'] = post_event_call
proj_desc['host_to_device'] = tabify("proj%(id)s.host_to_device();" % {'id':proj.id}, 1)+"\n"
proj_desc['device_to_host'] = tabify("proj%(id)s.device_to_host();" % {'id':proj.id}, 1)+"\n"
return proj_desc
def _configure_template_ids(self, proj):
"""
Assign the correct template dictionary based on projection
storage format.
"""
if proj._storage_format == "csr":
self._templates.update(CSR_CUDA.conn_templates)
if proj._storage_order == "post_to_pre":
self._template_ids.update(CSR_CUDA.conn_ids)
self._template_ids.update({
# TODO: as this value is hardcoded, this will lead to a recompile if the delay is modified
# -> do we want this ? we could also use [this->delay-1]
'delay_u' : '[' + str(proj.uniform_delay-1) + ']' # uniform delay
})
else:
raise NotImplementedError
elif proj._storage_format == "bsr":
self._templates.update(BSR_CUDA.conn_templates)
self._template_ids.update(BSR_CUDA.conn_ids)
elif proj._storage_format == "coo":
self._templates.update(COO_CUDA.conn_templates)
if proj._storage_order == "post_to_pre":
self._template_ids.update(COO_CUDA.conn_ids)
else:
raise NotImplementedError
elif proj._storage_format == "ellr":
self._templates.update(ELLR_CUDA.conn_templates)
if proj._storage_order == "post_to_pre":
self._template_ids.update(ELLR_CUDA.conn_ids)
else:
raise NotImplementedError
elif proj._storage_format == "ell":
self._templates.update(ELL_CUDA.conn_templates)
if proj._storage_order == "post_to_pre":
self._template_ids.update(ELL_CUDA.conn_ids)
else:
raise NotImplementedError
elif proj._storage_format == "hyb":
self._templates.update(HYB_CUDA.conn_templates)
# Indices must be set locally for each part
elif proj._storage_format == "dense":
self._templates.update(Dense_CUDA.conn_templates)
if proj._storage_order == "post_to_pre":
self._template_ids.update(Dense_CUDA.conn_ids)
else:
raise NotImplementedError
else:
raise Global.InvalidConfiguration(" The storage_format="+str(proj._storage_format)+" is not available on CUDA devices")
def _generate_launch_config(self, proj):
"""
TODO: multiple targets???
"""
code = self._templates['launch_config'] % {
'id_proj': proj.id
}
return code
def _computesum_rate(self, proj):
"""
returns all data needed for compute postsynaptic sum kernels:
header: kernel prototypes
body: kernel implementation
call: kernel call
"""
# Specific projection
if 'psp_header' in proj._specific_template.keys() and \
'psp_body' in proj._specific_template.keys() and \
'psp_call' in proj._specific_template.keys():
return proj._specific_template['psp_header'], proj._specific_template['psp_body'], proj._specific_template['psp_call']
# Dictionary of keywords to transform the parsed equations
ids = deepcopy(self._template_ids)
# Some adjustments to spare single used local variables
if proj._storage_format == "ellr":
ids['post_index'] = "[rank_post[i]]"
ids['pre_index'] = "[rank_pre[j*post_size+i]]"
elif proj._storage_format == "bsr":
ids['pre_prefix'] = "loc_pre_"
ids['pre_index'] = "[col]"
# Dependencies
dependencies = list(set(proj.synapse_type.description['dependencies']['pre']))
#
# Retrieve the PSP
add_args_header = ""
add_args_call = ""
if not 'psp' in proj.synapse_type.description.keys(): # default
psp = """%(preprefix)s.r%(pre_index)s * w%(local_index)s;"""
add_args_header += "const %(float_prec)s* __restrict__ pre_r, const %(float_prec)s* __restrict__ w" % {'float_prec': Global.config['precision']}
add_args_call = "pop%(id_pre)s.gpu_r, proj%(id_proj)s.gpu_w " % {'id_proj': proj.id, 'id_pre': proj.pre.id}
else: # custom psp
psp = (proj.synapse_type.description['psp']['cpp'])
# update dependencies
for dep in proj.synapse_type.description['psp']['dependencies']:
_, attr = self._get_attr_and_type(proj, dep)
attr_ids = {
'id_proj': proj.id,
'type': attr['ctype'],
'name': attr['name']
}
add_args_header += ", const %(type)s* __restrict__ %(name)s" % attr_ids
add_args_call += ", proj%(id_proj)s.gpu_%(name)s" % attr_ids
for dep in list(set(proj.synapse_type.description['dependencies']['pre'])):
_, attr = PopulationGenerator._get_attr_and_type(proj.pre, dep)
attr_ids = {
'id_pre': proj.pre.id,
'type': attr['ctype'],
'name': attr['name']
}
add_args_header += ", %(type)s* pre_%(name)s" % attr_ids
add_args_call += ", pop%(id_pre)s.gpu_%(name)s" % attr_ids
# Special case where w is a single value
if proj._has_single_weight():
psp = re.sub(
r'([^\w]+)w%\(local_index\)s',
r'\1w',
' ' + psp
)
# Allow the use of global variables in psp (issue60)
for var in dependencies:
if var in proj.pre.neuron_type.description['global']:
psp = psp.replace("%(pre_prefix)s"+var+"%(pre_index)s", "%(pre_prefix)s"+var+"%(global_index)s")
# connectivity, yet only CSR
conn_header = ""
conn_call = ""
#
# finish the kernel etc.
operation = proj.synapse_type.operation
if proj._storage_format != "hyb":
idx_type, _, size_type, _ = determine_idx_type_for_projection(proj)
id_dict = {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'idx_type': idx_type,
'size_type': size_type
}
# connectivity
conn_header = self._templates['conn_header'] % id_dict
conn_call = self._templates['conn_call'] % id_dict
body_code = self._templates['rate_psp']['body'][operation] % {
'float_prec': Global.config['precision'],
'idx_type': idx_type,
'size_type': size_type,
'id_proj': proj.id,
'conn_args': conn_header,
'target_arg': "sum_"+proj.target,
'add_args': add_args_header,
'psp': psp % ids,
'thread_init': self._templates['rate_psp']['thread_init'][Global.config['precision']][operation],
'post_index': ids['post_index']
}
header_code = self._templates['rate_psp']['header'] % {
'float_prec': Global.config['precision'],
'id': proj.id,
'conn_args': conn_header,
'target_arg': "sum_"+proj.target,
'add_args': add_args_header
}
call_code = self._templates['rate_psp']['call'] % {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'conn_args': conn_call,
'target': proj.target,
'target_arg': ", pop%(id_post)s.gpu__sum_%(target)s" % {'id_post': proj.post.id, 'target': proj.target},
'add_args': add_args_call,
'float_prec': Global.config['precision'],
'idx_type': idx_type
}
else:
# Should be equal to ProjectionGenerator._configure_template_ids()
idx_type, _, size_type, _ = determine_idx_type_for_projection(proj)
id_dict = {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'idx_type': idx_type,
'size_type': size_type
}
#
# ELLPACK - partition
conn_header = ELL_CUDA.conn_templates['conn_header'] % id_dict
conn_call = ELL_CUDA.conn_templates['conn_call'] % id_dict
ell_ids = {
'idx_type': idx_type,
'local_index': "[j*post_size+i]",
'semiglobal_index': '[i]',
'global_index': '[0]',
'pre_index': '[rank_pre[j*post_size+i]]',
'post_index': '[rank_post[i]]',
'pre_prefix': 'pre_',
'post_prefix': 'post_'
}
body_code = ELL_CUDA.conn_templates['rate_psp']['body'][operation] % {
'idx_type': idx_type,
'float_prec': Global.config['precision'],
'id_proj': proj.id,
'conn_args': conn_header,
'target_arg': "sum_"+proj.target,
'add_args': add_args_header,
'psp': psp % ell_ids,
'thread_init': ELLR_CUDA.conn_templates['rate_psp']['thread_init'][Global.config['precision']][operation],
'post_index': ell_ids['post_index']
}
header_code = ELL_CUDA.conn_templates['rate_psp']['header'] % {
'idx_type': idx_type,
'float_prec': Global.config['precision'],
'id': proj.id,
'conn_args': conn_header,
'target_arg': "sum_"+proj.target,
'add_args': add_args_header
}
#
# Coordinate - partition
conn_header = COO_CUDA.conn_templates['conn_header'] % id_dict
conn_call = COO_CUDA.conn_templates['conn_call'] % id_dict
coo_ids = {
'local_index': "[j]",
'semiglobal_index': '[i]',
'global_index': '[0]',
'pre_index': '[column_indices[j]]',
'post_index': '[row_indices[j]]',
'pre_prefix': 'pre_',
'post_prefix': 'post_',
}
body_code += COO_CUDA.conn_templates['rate_psp']['body'][operation] % {
'float_prec': Global.config['precision'],
'idx_type': idx_type,
'size_type': size_type,
'id_proj': proj.id,
'conn_args': conn_header,
'target_arg': "sum_"+proj.target,
'add_args': add_args_header,
'psp': psp % coo_ids,
'thread_init': COO_CUDA.conn_templates['rate_psp']['thread_init'][Global.config['precision']][operation],
'post_index': coo_ids['post_index']
}
header_code += COO_CUDA.conn_templates['rate_psp']['header'] % {
'float_prec': Global.config['precision'],
'id': proj.id,
'conn_args': conn_header,
'target_arg': "sum_"+proj.target,
'add_args': add_args_header
}
# update dependencies
add_args_call_coo = add_args_call
add_args_call_ell = add_args_call
for dep in proj.synapse_type.description['psp']['dependencies']:
add_args_call_coo = add_args_call_coo.replace("gpu_"+dep+",", "gpu_"+dep+"->coo,")
add_args_call_ell = add_args_call_ell.replace("gpu_"+dep+",", "gpu_"+dep+"->ell,")
call_code = HYB_CUDA.conn_templates['rate_psp']['call'] % {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'conn_args': conn_call,
'target': proj.target,
'target_arg': ", pop%(id_post)s.gpu__sum_%(target)s" % {'id_post': proj.post.id, 'target': proj.target},
'add_args_coo': add_args_call_coo,
'add_args_ell': add_args_call_ell,
'float_prec': Global.config['precision']
}
# Take delays into account if any
if proj.max_delay > 1:
# Delayed variables
if isinstance(proj.pre, PopulationView):
delayed_variables = proj.pre.population.delayed_variables
else:
delayed_variables = proj.pre.delayed_variables
id_pre = str(proj.pre.id)
for var in sorted(list(set(delayed_variables))):
call_code = call_code.replace("pop"+id_pre+".gpu_"+var, "pop"+id_pre+".gpu_delayed_"+var+"[proj"+str(proj.id)+".delay-1]")
# Profiling
if self._prof_gen:
call_code = self._prof_gen.annotate_computesum_rate(proj, call_code)
return header_code, body_code, call_code
def _computesum_spiking(self, proj):
"""
Generate code for the spike propagation. As ANNarchy supports a set of
different data structures, this method split in up into several sub
functions.
In contrast to _computesum_rate() the spike propagation kernel need
to implement the signal transmission (event- as well as continous)
and also the equations filled in the 'pre-spike' field of synapse
desctiption.
"""
# Specific template ?
if 'header' in proj._specific_template.keys() and \
'body' in proj._specific_template.keys() and \
'call' in proj._specific_template.keys():
try:
header = proj._specific_template['header']
body = proj._specific_template['body']
call = proj._specific_template['call']
except KeyError:
Global._error('header,spike_count body and call should be overwritten')
return header, body, call
# some variables needed for the final templates
psp_code = ""
kernel_args = ""
kernel_args_call = ""
pre_spike_code = ""
kernel_deps = []
if proj.max_delay > 1 and proj.uniform_delay == -1:
Global._error("Non-uniform delays are not supported yet on GPUs.")
idx_type, _, size_type, _ = determine_idx_type_for_projection(proj)
# some basic definitions
ids = {
# identifiers
'id_proj' : proj.id,
'id_post': proj.post.id,
'id_pre': proj.pre.id,
# common for all equations
'local_index': "[syn_idx]",
'semiglobal_index': '[post_rank]',
'global_index': '[0]',
'float_prec': Global.config['precision'],
# psp specific
'pre_prefix': 'pre_',
'post_prefix': 'post_',
'pre_index': '[col_idx[syn_idx]]',
'post_index': '[post_rank]',
# CPP types
'idx_type': idx_type,
'size_type': size_type
}
#
# All statements in the 'pre_spike' field of synapse description
#
for var in proj.synapse_type.description['pre_spike']:
if var['name'] == "g_target": # synaptic transmission
# compute psp
psp_code += "%(float_prec)s tmp = %(psp)s\n" % {
'psp': var['cpp'].split('=')[1] % ids,
'float_prec': Global.config['precision']
}
# apply to all targets
target_list = proj.target if isinstance(proj.target, list) else [proj.target]
for target in sorted(list(set(target_list))):
psp_code += "atomicAdd(&g_%(target)s[post_rank], tmp);\n" % {'target': target}
else:
condition = ""
# Check conditions to update the variable
if var['name'] == 'w': # Surround it by the learning flag
condition = "plasticity"
# Flags avoids pre-spike evaluation when post fires at the same time
if 'unless_post' in var['flags']:
simultaneous = "pop%(id_pre)s_last_spike[_pr] != pop%(id_post)s_last_spike[%(semiglobal_index)s]" % ids
if condition == "":
condition = simultaneous
else:
condition += "&&(" + simultaneous + ")"
eq_dict = {
'eq': var['eq'],
'cpp': var['cpp'],
'bounds': get_bounds(var),
'condition': condition,
}
# Generate the code
if condition != "":
pre_spike_code += """
// unless_post can prevent evaluation of presynaptic variables
if(%(condition)s){
// %(eq)s
%(cpp)s
%(bounds)s
}
""" % eq_dict
else: # Normal synaptic variable
pre_spike_code += """
// %(eq)s
%(cpp)s
%(bounds)s""" % eq_dict
# Update the dependencies
kernel_deps.append(var['name']) # right side
for dep in var['dependencies']: # left side
kernel_deps.append(dep)
#
# Event-driven integration of synaptic variables
#
has_exact = False
event_driven_code = ''
for var in proj.synapse_type.description['variables']:
if var['method'] == 'event-driven':
has_exact = True
event_dict = {
'eq': var['eq'],
'exact': var['cpp'].replace('(t)', '(t-1)')
}
event_driven_code += """
// %(eq)s
%(exact)s
""" % event_dict
# add the dependencies to kernel dependencies
for dep in var['dependencies']:
kernel_deps.append(dep)
# Does an event-driven variable occur?
if has_exact:
event_driven_code += """
// Update the last event for the synapse
_last_event%(local_index)s = t;
"""
# event-driven requires access to last event variable
kernel_args += ", long* _last_event"
kernel_args_call += ", proj%(id_proj)s._gpu_last_event" % ids
# Add pre- and post-synaptic population dependencies
pre_post_deps = list(set(proj.synapse_type.description['dependencies']['pre'] + proj.synapse_type.description['dependencies']['post']))
pre_post_args = self._gen_kernel_args(proj, pre_post_deps, pre_post_deps)
kernel_args += pre_post_args[0]
kernel_args_call += pre_post_args[1]
# Add synaptic variables to kernel arguments
kernel_deps = list(set(kernel_deps)) # sort out doublings
for dep in kernel_deps:
if dep == "w" or dep == "g_target":
# already contained
continue
attr_type, attr_dict = self._get_attr_and_type(proj, dep)
attr_ids = {
'id_proj': proj.id,
'name': attr_dict['name'],
'type': attr_dict['ctype']
}
if attr_type == 'par' and attr_dict['locality'] == "global":
kernel_args += ", const %(type)s %(name)s" % attr_ids
kernel_args_call += ", proj%(id_proj)s.%(name)s" % attr_ids
# replace any occurences of this parameter
if event_driven_code.strip() != '':
event_driven_code = event_driven_code.replace(attr_dict['name']+'%(global_index)s', attr_dict['name'])
if pre_spike_code.strip() != '':
pre_spike_code = pre_spike_code.replace(attr_dict['name']+'%(global_index)s', attr_dict['name'])
else:
kernel_args += ", %(type)s* %(name)s" % attr_ids
kernel_args_call += ", proj%(id_proj)s.gpu_%(name)s" % attr_ids
#
# Finally, fill the templates,
# we start with the event-driven component.
#
if len(pre_spike_code) == 0 and len(psp_code) == 0:
header = ""
body = ""
call = ""
else:
# select the correct template
template = self._templates['spike_transmission']['event_driven'][proj._storage_order]
# Connectivity description, we need to read-out the view
# which represents the pre-synaptic entries which means
# columns in post-to-pre and rows for pre-to-post orientation.
if proj._storage_order == "post_to_pre":
conn_header = "%(size_type)s* col_ptr, %(idx_type)s* row_idx, %(idx_type)s* inv_idx, %(float_prec)s *w" % ids
conn_call = "proj%(id_proj)s.gpu_col_ptr, proj%(id_proj)s.gpu_row_idx, proj%(id_proj)s.gpu_inv_idx, proj%(id_proj)s.gpu_w" % ids
else:
conn_header = "%(size_type)s* row_ptr, %(idx_type)s* col_idx, %(float_prec)s *w" % ids
conn_call = "proj%(id_proj)s._gpu_row_ptr, proj%(id_proj)s._gpu_col_idx, proj%(id_proj)s.gpu_w" % ids
# Population sizes
pre_size = proj.pre.size if isinstance(proj.pre, Population) else proj.pre.population.size
post_size = proj.post.size if isinstance(proj.post, Population) else proj.post.population.size
# PSP targets
targets_call = ""
targets_header = ""
target_list = proj.target if isinstance(proj.target, list) else [proj.target]
for target in target_list:
targets_call += ", pop%(id_post)s.gpu_g_"+target
targets_header += (", %(float_prec)s* g_"+target) % {'float_prec': Global.config['precision']}
# finalize call, body and header
call = template['call'] % {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'target': target_list[0],
'kernel_args': kernel_args_call % {'id_post': proj.post.id, 'target': target},
'conn_args': conn_call + targets_call % {'id_post': proj.post.id}
}
body = template['body'] % {
'id': proj.id,
'float_prec': Global.config['precision'],
'conn_arg': conn_header + targets_header,
'kernel_args': kernel_args,
'event_driven': tabify(event_driven_code % ids, 2),
'psp': tabify(psp_code, 3),
'pre_event': tabify(pre_spike_code % ids, 3),
'pre_size': pre_size,
'post_size': post_size,
}
header = template['header'] % {
'id': proj.id,
'float_prec': Global.config['precision'],
'conn_header': conn_header + targets_header,
'kernel_args': kernel_args
}
# If the synaptic transmission is not event-based,
# we need to add a rate-coded-like kernel.
if 'psp' in proj.synapse_type.description.keys():
# transfrom psp equation
psp_code = proj.synapse_type.description['psp']['cpp']
# update dependencies
for dep in proj.synapse_type.description['psp']['dependencies']:
if dep == "w":
continue
_, attr = self._get_attr_and_type(proj, dep)
attr_ids = {
'id_proj': proj.id,
'type': attr['ctype'],
'name': attr['name']
}
kernel_args += ", %(type)s* %(name)s" % attr_ids
kernel_args_call += ", proj%(id_proj)s.gpu_%(name)s" % attr_ids
psp_code = proj.synapse_type.description['psp']['cpp'] % ids
# select the correct template
template = self._templates['spike_transmission']['continous'][proj._storage_order]
call += template['call'] % {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'target_arg': ', pop%(id_post)s.gpu_g_%(target)s' % {'id_post': proj.post.id, 'target': proj.target},
'target': proj.target,
'kernel_args': kernel_args_call,
'float_prec': Global.config['precision']
}
body += template['body'] % {
'id_proj': proj.id,
'target_arg': proj.target,
'kernel_args': kernel_args,
'psp': psp_code,
'pre_code': tabify(pre_spike_code % ids, 3),
'float_prec': Global.config['precision']
}
header += template['header'] % {
'id': proj.id,
'kernel_args': kernel_args,
'target_arg': 'g_'+proj.target,
'float_prec': Global.config['precision']
}
return header, body, call
def _declaration_accessors(self, proj, single_matrix):
"""
Extend basic declaration statements by CUDA streams.
"""
declaration, accessor = ProjectionGenerator._declaration_accessors(self, proj, single_matrix)
declaration['cuda_stream'] = BaseTemplates.cuda_stream
return declaration, accessor
@staticmethod
def _select_deps(proj, locality):
"""
Dependencies of synaptic equations consist of several components:
* access to pre- or post-population
* variables / parameters of the projection
* pre- or post-spike event
Return:
* syn_deps list of all dependencies
* neur_deps list of dependencies part of neurons
"""
syn_deps = []
# Access to pre- or postsynaptic neurons
neur_deps = list(set(proj.synapse_type.description['dependencies']['pre'] +
proj.synapse_type.description['dependencies']['post']))
for dep in neur_deps:
syn_deps.append(dep)
# Variables
for var in proj.synapse_type.description['variables']:
if var['eq'] == '':
continue # nothing to do here
if var['locality'] == locality:
syn_deps.append(var['name'])
for dep in var['dependencies']:
syn_deps.append(dep)
# Random distributions
for rd in proj.synapse_type.description['random_distributions']:
for dep in rd['dependencies']:
syn_deps += dep
syn_deps = list(set(syn_deps))
return syn_deps, neur_deps
@staticmethod
def _gen_kernel_args(proj, pop_deps, deps):
"""
The header and function definitions as well as the call statement need
to be extended with the additional variables.
"""
kernel_args = ""
kernel_args_call = ""
for dep in deps:
# The variable dep is part of pre-/post population
if dep in pop_deps:
if dep in proj.synapse_type.description['dependencies']['pre']:
attr_type, attr_dict = PopulationGenerator._get_attr_and_type(proj.pre, dep)
ids = {
'type': attr_dict['ctype'],
'name': attr_dict['name'],
'id': proj.pre.id
}
kernel_args += ", %(type)s* pre_%(name)s" % ids
kernel_args_call += ", pop%(id)s.gpu_%(name)s" % ids
if dep in proj.synapse_type.description['dependencies']['post']:
attr_type, attr_dict = PopulationGenerator._get_attr_and_type(proj.post, dep)
ids = {
'type': attr_dict['ctype'],
'name': attr_dict['name'],
'id': proj.post.id
}
kernel_args += ", %(type)s* post_%(name)s" % ids
kernel_args_call += ", pop%(id)s.gpu_%(name)s" % ids
# The variable dep is part of the projection
else:
attr_type, attr_dict = ProjectionGenerator._get_attr_and_type(proj, dep)
if attr_type == "par":
ids = {
'id_proj': proj.id,
'type': attr_dict['ctype'],
'name': attr_dict['name']
}
if dep in proj.synapse_type.description['global']:
kernel_args += ", const %(type)s %(name)s" % ids
kernel_args_call += ", proj%(id_proj)s.%(name)s" % ids
else:
kernel_args += ", %(type)s* %(name)s" % ids
kernel_args_call += ", proj%(id_proj)s.gpu_%(name)s" % ids
elif attr_type == "var":
ids = {
'id_proj': proj.id,
'type': attr_dict['ctype'],
'name': attr_dict['name']
}
kernel_args += ", %(type)s* %(name)s" % ids
kernel_args_call += ", proj%(id_proj)s.gpu_%(name)s" % ids
elif attr_type == "rand":
ids = {
'id_proj': proj.id,
'type': 'curandState',
'name': attr_dict['name']
}
kernel_args += ", %(type)s* state_%(name)s" % ids
kernel_args_call += ", proj%(id_proj)s.gpu_%(name)s" % ids
else:
raise ValueError("attr_type for variable " + dep +" is invalid")
#
# global operations related to pre- and post-synaptic operations
for glop in proj.synapse_type.description['pre_global_operations']:
attr = PopulationGenerator._get_attr(proj.pre, glop['variable'])
ids = {
'id': proj.pre.id,
'name': glop['variable'],
'type': attr['ctype'],
'func': glop['function']
}
kernel_args += ", %(type)s pre__%(func)s_%(name)s" % ids
kernel_args_call += ", pop%(id)s._%(func)s_%(name)s" % ids
for glop in proj.synapse_type.description['post_global_operations']:
attr = PopulationGenerator._get_attr(proj.post, glop['variable'])
ids = {
'id': proj.post.id,
'name': glop['variable'],
'type': attr['ctype'],
'func': glop['function']
}
kernel_args += ", %(type)s post__%(func)s_%(name)s" % ids
kernel_args_call += ", pop%(id)s._%(func)s_%(name)s" % ids
#
# event-driven spike synapses require the access to last_spike member
# of pre- and post-synaptic populations.
if proj.synapse_type.type == "spike":
kernel_args = ", long int* pre_last_spike, long int* post_last_spike" + kernel_args
kernel_args_call = ", pop%(id_pre)s.gpu_last_spike, pop%(id_post)s.gpu_last_spike" % {'id_pre': proj.pre.id, 'id_post': proj.post.id} + kernel_args_call
return kernel_args, kernel_args_call
def _header_structural_plasticity(self, proj):
Global._error("Structural Plasticity is not supported on GPUs yet.")
def _local_functions(self, proj):
"""
Definition of user-defined local functions attached to
a neuron. These functions will take place in the
ANNarchyDevice.cu file.
As the local functions can be occur repeatadly in the same file,
there are modified with pop[id]_ to unique them.
Return:
* host_define, device_define
"""
# Local functions
if len(proj.synapse_type.description['functions']) == 0:
return "", ""
host_code = ""
device_code = ""
for func in proj.synapse_type.description['functions']:
cpp_func = func['cpp'] + '\n'
host_code += cpp_func
# TODO: improve code
if (Global.config["precision"]=="float"):
device_code += cpp_func.replace('float' + func['name'], '__device__ float proj%(id)s_%(func)s' % {'id': proj.id, 'func': func['name']})
else:
device_code += cpp_func.replace('double '+ func['name'], '__device__ double proj%(id)s_%(func)s' % {'id': proj.id, 'func':func['name']})
return host_code, check_and_apply_pow_fix(device_code)
def _replace_local_funcs(self, proj, glob_eqs, semiglobal_eqs, loc_eqs):
"""
As the local functions can be occur repeatadly in the same file,
there are modified with proj[id]_ to unique them. Now we need
to adjust the call accordingly.
"""
for func in proj.synapse_type.description['functions']:
search_term = r"%(name)s\([^\(]*\)" % {'name': func['name']}
func_occur = re.findall(search_term, glob_eqs)
for term in func_occur:
glob_eqs = loc_eqs.replace(term, term.replace(func['name'], 'proj'+str(proj.id)+'_'+func['name']))
func_occur = re.findall(search_term, semiglobal_eqs)
for term in func_occur:
semiglobal_eqs = loc_eqs.replace(term, term.replace(func['name'], 'proj'+str(proj.id)+'_'+func['name']))
func_occur = re.findall(search_term, loc_eqs)
for term in func_occur:
loc_eqs = loc_eqs.replace(term, term.replace(func['name'], 'proj'+str(proj.id)+'_'+func['name']))
return glob_eqs, semiglobal_eqs, loc_eqs
def _replace_random(self, loc_eqs, loc_idx, glob_eqs, random_distributions):
"""
This method replace the variables rand_%(id)s in the parsed equations
by the corresponding curand... term.
"""
# double precision methods have a postfix
prec_extension = "" if Global.config['precision'] == "float" else "_double"
loc_pre = ""
semi_pre = ""
glob_pre = ""
for dist in random_distributions:
print(dist)
print(loc_eqs)
if dist['dist'] == "Uniform":
dist_ids = {
'postfix': prec_extension,
'rd': dist['name'],
'min': dist['args'].split(',')[0],
'max': dist['args'].split(',')[1],
'local_index': loc_idx
}
if dist["locality"] == "local":
term = """( curand_uniform%(postfix)s( &state_%(rd)s%(local_index)s ) * (%(max)s - %(min)s) + %(min)s )""" % dist_ids
loc_pre += "%(prec)s %(name)s = %(term)s;" % {'prec': Global.config['precision'], 'name': dist['name'], 'term': term}
# suppress local index
loc_eqs = loc_eqs.replace(dist['name']+loc_idx, dist['name'])
else:
# HD (17th May 2021): this path can not be reached as the parser rejects equations like:
# dw/dt = -w * Uniform(0,.1) : init=1, midpoint
raise NotImplementedError
elif dist['dist'] == "Normal":
dist_ids = {
'postfix': prec_extension, 'rd': dist['name'],
'mean': dist['args'].split(",")[0],
'sigma': dist['args'].split(",")[1]
}
if dist["locality"] == "local":
term = """( curand_normal%(postfix)s( &state_%(rd)s[j] ) * %(sigma)s + %(mean)s )""" % dist_ids
loc_pre += "%(prec)s %(name)s = %(term)s;" % {'prec': Global.config['precision'], 'name': dist['name'], 'term': term}
# suppress local index
loc_eqs = loc_eqs.replace(dist['name']+"[j]", dist['name'])
else:
# HD (17th May 2021): this path can not be reached as the parser rejects equations like:
# dw/dt = -w * Uniform(0,.1) : init=1, midpoint
raise NotImplementedError
elif dist['dist'] == "LogNormal":
dist_ids = {
'postfix': prec_extension, 'rd': dist['name'],
'mean': dist['args'].split(',')[0],
'std_dev': dist['args'].split(',')[1]
}
if dist["locality"] == "local":
term = """( curand_log_normal%(postfix)s( &state_%(rd)s[j], %(mean)s, %(std_dev)s) )""" % dist_ids
loc_pre += "%(prec)s %(name)s = %(term)s;" % {'prec': Global.config['precision'], 'name': dist['name'], 'term': term}
# suppress local index
loc_eqs = loc_eqs.replace(dist['name']+"[j]", dist['name'])
else:
# HD (17th May 2021): this path can not be reached as the parser rejects equations like:
# dw/dt = -w * Uniform(0,.1) : init=1, midpoint
raise NotImplementedError
else:
Global._error("Unsupported random distribution on GPUs: " + dist['dist'])
# check which equation blocks we need to extend
if len(loc_pre) > 0:
loc_eqs = tabify(loc_pre, 2) + "\n" + loc_eqs
if len(glob_pre) > 0:
glob_eqs = tabify(glob_pre, 1) + "\n" + glob_eqs
return loc_eqs, glob_eqs
def _post_event(self, proj):
"""
Post-synaptic event kernel for CUDA devices
"""
if proj.synapse_type.type == "rate":
return "", "", ""
if proj.synapse_type.description['post_spike'] == []:
return "", "", ""
if proj._storage_format == "csr":
ids = {
'id_proj' : proj.id,
'target': proj.target,
'id_post': proj.post.id,
'id_pre': proj.pre.id,
'local_index': "[j]",
'semiglobal_index': '[i]',
'global_index': '[0]',
'pre_index': '[pre_rank[j]]',
'post_index': '[post_rank[i]]',
'pre_prefix': 'pop'+ str(proj.pre.id) + '.',
'post_prefix': 'pop'+ str(proj.post.id) + '.'
}
else:
raise NotImplementedError
add_args_header = ""
add_args_call = ""
# Event-driven integration
has_event_driven = False
for var in proj.synapse_type.description['variables']:
if var['method'] == 'event-driven':
has_event_driven = True
# Generate event-driven code
event_driven_code = ""
event_deps = []
if has_event_driven:
# event-driven rely on last pre-synaptic event
add_args_header += ", long* _last_event"
add_args_call += ", proj%(id_proj)s._gpu_last_event" % {'id_proj': proj.id}
for var in proj.synapse_type.description['variables']:
if var['method'] == 'event-driven':
event_driven_code += '// ' + var['eq'] + '\n'
event_driven_code += var['cpp'] + '\n'
for deps in var['dependencies']:
event_deps.append(deps)
event_driven_code += """
// Update the last event for the synapse
_last_event%(local_index)s = t;
""" % {'local_index' : '[j]'}
# Gather the equations
post_code = ""
post_deps = []
for post_eq in proj.synapse_type.description['post_spike']:
post_code += '// ' + post_eq['eq'] + '\n'
if post_eq['name'] == 'w':
post_code += "if(plasticity)\n"
post_code += post_eq['cpp'] + '\n'
post_code += get_bounds(post_eq) + '\n'
# add dependencies, only right side!
for deps in post_eq['dependencies']:
post_deps.append(deps)
# left side of equations is not part of dependencies
post_deps.append(post_eq['name'])
# Create add_args for event-driven eqs and post_event
kernel_deps = list(set(post_deps+event_deps)) # variables can occur in several eqs
for dep in kernel_deps:
if dep == "w":
continue
attr_type, attr_dict = self._get_attr_and_type(proj, dep)
attr_ids = {
'id': proj.id, 'type': attr_dict['ctype'], 'name': attr_dict['name']
}
if attr_type == 'par' and attr_dict['locality'] == "global":
add_args_header += ', const %(type)s %(name)s' % attr_ids
add_args_call += ', proj%(id)s.%(name)s' % attr_ids
if post_code.strip != '':
post_code = post_code.replace(attr_dict['name']+"%(global_index)s", attr_dict['name'])
if event_driven_code.strip() != '':
event_driven_code = event_driven_code.replace(attr_dict['name']+"%(global_index)s", attr_dict['name'])
else:
add_args_header += ', %(type)s* %(name)s' % attr_ids
add_args_call += ', proj%(id)s.gpu_%(name)s' % attr_ids
if proj._storage_format == "csr":
idx_type, _, size_type, _ = determine_idx_type_for_projection(proj)
conn_ids = {'idx_type': idx_type, 'size_type': size_type}
if proj._storage_order == "post_to_pre":
conn_header = "%(size_type)s* row_ptr, %(idx_type)s* col_idx, " % conn_ids
conn_call = ", proj%(id_proj)s.gpu_row_ptr, proj%(id_proj)s.gpu_pre_rank"
else:
conn_header = "%(size_type)s* col_ptr, %(idx_type)s* row_idx, " % conn_ids
conn_call = ", proj%(id_proj)s.gpu_col_ptr, proj%(id_proj)s.gpu_row_idx"
templates = self._templates['post_event'][proj._storage_order]
else:
raise NotImplementedError
postevent_header = templates['header'] % {
'id_proj': proj.id,
'conn_args': conn_header,
'add_args': add_args_header,
'float_prec': Global.config['precision']
}
postevent_body = templates['body'] % {
'id_proj': proj.id,
'conn_args': conn_header,
'add_args': add_args_header,
'event_driven': tabify(event_driven_code % ids, 2),
'post_code': tabify(post_code % ids, 2),
'float_prec': Global.config['precision']
}
postevent_call = templates['call'] % {
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'target': proj.target[0] if isinstance(proj.target, list) else proj.target,
'conn_args': conn_call % ids,
'add_args': add_args_call
}
return postevent_body, postevent_header, postevent_call
def _init_random_distributions(self, proj):
# Random numbers
code = ""
if len(proj.synapse_type.description['random_distributions']) > 0:
code += """
// Random numbers"""
for dist in proj.synapse_type.description['random_distributions']:
rng_ids = {
'id': proj.id,
'rd_name': dist['name'],
}
code += self._templates['rng'][dist['locality']]['init'] % rng_ids
return code
def _memory_transfers(self, proj):
"""
Generate source code for transfer variables and parameters.
"""
if 'host_device_transfer' in proj._specific_template.keys() and \
'device_host_transfer' in proj._specific_template.keys():
return proj._specific_template['host_device_transfer'], proj._specific_template['device_host_transfer']
host_device_transfer = ""
device_host_transfer = ""
proc_attr = []
for attr in proj.synapse_type.description['parameters']+proj.synapse_type.description['variables']:
# avoid doublons
if attr['name'] in proc_attr:
continue
attr_type = "parameter" if attr in proj.synapse_type.description['parameters'] else "variable"
locality = attr['locality']
if attr_type == "parameter" and locality == "global":
continue
#
# Host -> Device
#
host_device_transfer += self._templates['host_to_device'][locality] % {
'id': proj.id,
'name': attr['name'],
'type': attr['ctype']
}
#
# Device -> Host
#
device_host_transfer += self._templates['device_to_host'][locality] % {
'id': proj.id, 'name': attr['name'], 'type': attr['ctype']
}
proc_attr.append(attr['name'])
return host_device_transfer, device_host_transfer
def _process_equations(self, proj, equations, ids, locality):
"""
Process the equation block and create equation blocks and
corresponding kernel argument list and call statement.
.. Note:
This function is a helper function and should be called by
_update_synapse() only.
"""
# Process equations and determine dependencies
syn_deps, neur_deps = self._select_deps(proj, locality)
kernel_args, kernel_args_call = self._gen_kernel_args(proj, neur_deps, syn_deps)
# Add pre_rank/post_rank identifier if needed
rk_assign = ""
if locality == "semiglobal":
rk_assign += "%(idx_type)s rk_post = rank_post%(semiglobal_index)s;\n"
elif locality=="local":
# rk_pre/rk_pre depend on the matrix format
if proj._storage_format in ["csr"] :
rk_assign += "%(idx_type)s rk_pre = rank_pre%(local_index)s;\n"
elif proj._storage_format in ["dense"]:
pass
else:
rk_assign += "%(idx_type)s rk_post = rank_post%(semiglobal_index)s;\n"
rk_assign += "%(idx_type)s rk_pre = rank_pre%(local_index)s;\n"
# finalize rank assignment code
rk_assign = tabify(rk_assign, 2 if proj._storage_format == "csr" else 3)
# Gather pre-loop declaration (dt/tau for ODEs)
pre_loop = ""
for var in proj.synapse_type.description['variables']:
if var['locality'] == locality:
if 'pre_loop' in var.keys() and len(var['pre_loop']) > 0:
pre_loop += Global.config['precision'] + ' ' + var['pre_loop']['name'] + ' = ' + var['pre_loop']['value'] + ';\n'
else:
continue
# Global parameters have no index
for syn_dep in syn_deps:
attr_type, attr_dict = self._get_attr_and_type(proj, syn_dep)
if attr_type == "par" and attr_dict['locality'] == "global" :
equations = equations.replace(attr_dict["name"]+"%(global_index)s", attr_dict["name"])
if pre_loop.strip() != '':
pre_loop = pre_loop.replace(attr_dict["name"]+"%(global_index)s", attr_dict["name"])
# Finalize equations, add pre-loop and/or rank assignment
equations = (rk_assign + equations) % ids
if pre_loop.strip() != '':
pre_loop = """\n// Updating the step sizes\n""" + pre_loop % ids
return equations, pre_loop, kernel_args, kernel_args_call
def _update_synapse(self, proj):
"""
Generate the device codes for synaptic equations. As the parallel
evaluation of local and global equations within one kernel would require
a __syncthread() call, we split up the implementation into two seperate
parts.
Return:
* a tuple contain three strings ( body, call, header )
"""
# Global variables
global_eq = generate_equation_code(proj.id, proj.synapse_type.description, 'global', 'proj', padding=1, wrap_w="plasticity")
# Semiglobal variables
semiglobal_eq = generate_equation_code(proj.id, proj.synapse_type.description, 'semiglobal', 'proj', padding=2, wrap_w="plasticity")
# Local variables
pad = 2 if proj._storage_format == "csr" else 3
local_eq = generate_equation_code(proj.id, proj.synapse_type.description, 'local', 'proj', padding=pad, wrap_w="plasticity")
# Something to do?
if global_eq.strip() == '' and semiglobal_eq.strip() == '' and local_eq.strip() == '':
return "", "", ""
# Modify the default dictionary for specific formats
ids = deepcopy(self._template_ids)
if proj._storage_format == "ell":
ids['pre_index'] = '[rk_pre]'
ids['post_index'] = '[rk_post]'
elif proj._storage_format == "csr":
ids['pre_index'] = '[rk_pre]'
ids['post_index'] = '[rk_post]'
# CPP type for indices
idx_type, _, size_type, _ = determine_idx_type_for_projection(proj)
# some commonly needed ids
ids.update({
'id_proj': proj.id,
'id_pre': proj.pre.id,
'id_post': proj.post.id,
'float_prec': Global.config['precision'],
'idx_type': idx_type,
'size_type': size_type
})
# generate the code
body = ""
header = ""
local_call = ""
global_call = ""
semiglobal_call = ""
#
# Fill code templates for global, semiglobal and local equations
#
if global_eq.strip() != '':
global_eq, global_pre_code, kernel_args_global, kernel_args_call_global = self._process_equations( proj, global_eq, ids, 'global' )
if semiglobal_eq.strip() != '':
semiglobal_eq, semiglobal_pre_code, kernel_args_semiglobal, kernel_args_call_semiglobal = self._process_equations( proj, semiglobal_eq, ids, 'semiglobal' )
if local_eq.strip() != '':
local_eq, local_pre_code, kernel_args_local, kernel_args_call_local = self._process_equations( proj, local_eq, ids, 'local' )
# replace the random distributions
local_eq, global_eq = self._replace_random(local_eq, ids['local_index'], global_eq, proj.synapse_type.description['random_distributions'])
#
# replace local function calls
if len(proj.synapse_type.description['functions']) > 0:
global_eq, semiglobal_eq, local_eq = self._replace_local_funcs(proj, global_eq, semiglobal_eq, local_eq)
# connectivity
conn_header = self._templates['conn_header'] % ids
conn_call = self._templates['conn_call'] % ids
# we seperated the execution of global/semiglobal/local into three kernels
# as the threads would have two different loads.
if global_eq.strip() != '':
body_dict = {
'kernel_args': kernel_args_global,
'global_eqs': global_eq,
'pre_loop': global_pre_code,
}
body_dict.update(ids)
body += self._templates['synapse_update']['global']['body'] % body_dict
header_dict = {
'kernel_args': kernel_args_global,
}
header_dict.update(ids)
header += self._templates['synapse_update']['global']['header'] % header_dict
call_dict = {
'target': proj.target[0] if isinstance(proj.target, list) else proj.target,
'kernel_args_call': kernel_args_call_global,
}
call_dict.update(ids)
global_call = self._templates['synapse_update']['global']['call'] % call_dict
if semiglobal_eq.strip() != '':
body_dict = {
'kernel_args': kernel_args_semiglobal,
'semiglobal_eqs': semiglobal_eq,
'pre_loop': semiglobal_pre_code,
}
body_dict.update(ids)
body += self._templates['synapse_update']['semiglobal']['body'] % body_dict
header_dict = {
'kernel_args': kernel_args_semiglobal,
}
header_dict.update(ids)
header += self._templates['synapse_update']['semiglobal']['header'] % header_dict
call_dict = {
'target': proj.target[0] if isinstance(proj.target, list) else proj.target,
'kernel_args_call': kernel_args_call_semiglobal,
}
call_dict.update(ids)
semiglobal_call = self._templates['synapse_update']['semiglobal']['call'] % call_dict
if local_eq.strip() != '':
body_dict = {
'conn_args': conn_header,
'kernel_args': kernel_args_local,
'local_eqs': local_eq,
'pre_loop': tabify(local_pre_code,1)
}
body_dict.update(ids)
body += self._templates['synapse_update']['local']['body'] % body_dict
header_dict = {
'conn_args': conn_header,
'kernel_args': kernel_args_local
}
header_dict.update(ids)
header += self._templates['synapse_update']['local']['header'] % header_dict
call_dict = {
'target': proj.target[0] if isinstance(proj.target, list) else proj.target,
'conn_args_call': conn_call,
'kernel_args_call': kernel_args_call_local
}
call_dict.update(ids)
local_call = self._templates['synapse_update']['local']['call'] % call_dict
call = self._templates['synapse_update']['call'] % {
'id_proj': proj.id,
'post': proj.post.id,
'pre': proj.pre.id,
'target': proj.target,
'global_call': global_call,
'semiglobal_call': semiglobal_call,
'local_call': local_call,
'float_prec': Global.config['precision']
}
# Profiling
if self._prof_gen:
call = self._prof_gen.annotate_update_synapse(proj, call)
return body, header, call
| vitay/ANNarchy | ANNarchy/generator/Projection/CUDAGenerator.py | Python | gpl-2.0 | 64,614 | [
"NEURON"
] | 954060335c71010452cbfe239530cfe7b3238e2cb683931591ce4a939cd1dcdf |
# ******************************************************
#
# File generated by: neuroConstruct v1.5.1
#
# ******************************************************
import neuron
from neuron import hoc
import nrn
hoc.execute('load_file("TestHDF5.hoc")')
| rgerkin/neuroConstruct | testProjects/TestHDF5/simulations/TestSpikesHDF5Parallel/run_TestHDF5.py | Python | gpl-2.0 | 260 | [
"NEURON"
] | 3c7f5fc73019584564f9bb2d823f4f15ba9d61d4935e9ab9267fc0c406f7b7b6 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Toyota Research Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import platform
import setuptools
import shutil
import subprocess
import sys
from distutils.command.build import build as _build
from distutils.command.sdist import sdist as _sdist
from distutils.errors import LibError
from distutils.util import get_platform
from setuptools.command.bdist_egg import bdist_egg as _bdist_egg
from setuptools.command.develop import develop as _develop
VERSION = '4.21.06.2'.replace(".0", ".")
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = os.path.join(ROOT_DIR)
def _build_dreal():
new_env = os.environ.copy()
new_env["PYTHON_BIN_PATH"] = sys.executable
if subprocess.call([
'bazel', 'build', '//dreal:_dreal_py.so',
'--cxxopt=-DDREAL_CHECK_INTERRUPT', '--python_path={}'.format(
sys.executable),
],
env=new_env) != 0:
raise LibError("Unable to build dReal.\n" +
"Please visit https://pypi.org/project/dreal and " +
"follow the instructions to install the prerequisites.")
def _copy_bins():
shutil.copy(os.path.join(SRC_DIR, 'bazel-bin', 'dreal', '_dreal_py.so'),
os.path.join(ROOT_DIR, 'dreal'))
os.chmod(os.path.join(ROOT_DIR, 'dreal', '_dreal_py.so'), 436)
shutil.copy(os.path.join(SRC_DIR, 'bazel-bin', 'libdreal_.so'),
os.path.join(ROOT_DIR, 'dreal'))
os.chmod(os.path.join(ROOT_DIR, 'dreal', 'libdreal_.so'), 436)
if sys.platform == 'darwin':
dst_full = os.path.join(ROOT_DIR, 'dreal', '_dreal_py.so')
subprocess.check_call(
["install_name_tool",
"-id",
os.path.join('@rpath', "_dreal_py.so"),
dst_full])
file_output = subprocess.check_output(["otool",
"-L",
dst_full]).decode("utf-8")
for line in file_output.splitlines():
# keep only file path, remove version information.
relative_path = line.split(' (')[0].strip()
# If path is relative, it needs to be replaced by absolute path.
if "@loader_path" not in relative_path:
continue
if "libdreal_.so" in relative_path:
subprocess.check_call(
["install_name_tool",
"-change", relative_path,
os.path.join('@loader_path', "libdreal_.so"),
dst_full])
class build(_build):
def run(self):
self.execute(_build_dreal, (), msg="Building dReal")
self.execute(_copy_bins, (), msg="Copying binaries")
_build.run(self)
class develop(_develop):
def run(self):
self.execute(_build_dreal, (), msg="Building dReal")
self.execute(_copy_bins, (), msg="Copying binaries")
_develop.run(self)
class bdist_egg(_bdist_egg):
def run(self):
self.run_command('build')
_bdist_egg.run(self)
class sdist(_sdist):
def run(self):
self.run_command('build')
_sdist.run(self)
long_description = """dReal4: SMT Solver for Nonlinear Theories of Reals
Please visit https://github.com/dreal/dreal4.
Precompiled Wheels
------------------
We provide precompiled distributions (`.whl`) for the following environments:
- macOS 11.0 / 10.15 + CPython 3
- Linux + CPython 3.7 / 3.8 / 3.9 / 3.10
You still need to install dReal prerequisites such as IBEX and CLP in
your system. To install them, please follow the instructions below:
macOS 11.0 / 10.15
brew tap robotlocomotion/director
brew tap dreal/dreal
brew install dreal --only-dependencies
Ubuntu 20.04 / 18.04
curl -fsSL https://raw.githubusercontent.com/dreal/dreal4/master/setup/ubuntu/`lsb_release -r -s`/install.sh | sudo bash
Build from Source
-----------------
If `pip` fails to find a precompiled distribution, it fetches dReal
source and build it from scratch. You need to install the required
packages to do so. To install them, please follow the instructions
below:
macOS 11.0 / 10.15
brew tap robotlocomotion/director
brew tap dreal/dreal
brew install dreal --only-dependencies --build-from-source
Ubuntu 20.04 / 18.04
curl -fsSL https://raw.githubusercontent.com/dreal/dreal4/master/setup/ubuntu/`lsb_release -r -s`/install_prereqs.sh | sudo bash
"""
if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:
idx = sys.argv.index('bdist_wheel') + 1
sys.argv.insert(idx, '--plat-name')
name = get_platform()
if 'linux' in name:
# linux_* platform tags are disallowed because the python
# ecosystem is fubar linux builds should be built in the
# centos 5 vm for maximum compatibility see
# https://github.com/pypa/manylinux see also
# https://github.com/angr/angr-dev/blob/master/admin/bdist.py
sys.argv.insert(idx + 1, 'manylinux1_' + platform.machine())
elif 'mingw' in name:
if platform.architecture()[0] == '64bit':
sys.argv.insert(idx + 1, 'win_amd64')
else:
sys.argv.insert(idx + 1, 'win32')
else:
# https://www.python.org/dev/peps/pep-0425/
sys.argv.insert(idx + 1, name.replace('.', '_').replace('-', '_'))
# Make a wheel which is specific to the minor version of Python
# For example, "cp35".
#
# Note: We assume that it's using cpython.
if not any(arg.startswith('--python-tag') for arg in sys.argv):
python_tag = "cp%d%d" % (sys.version_info.major,
sys.version_info.minor)
sys.argv.extend(['--python-tag', python_tag])
setuptools.setup(
name='dreal', # Required
version=VERSION, # Required
description='SMT Solver for Nonlinear Theories of Reals', # Optional
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/dreal/dreal4', # Optional
author='Soonho Kong', # Optional
author_email='soonho.kong@gmail.com', # Optional
classifiers=[ # Optional
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
keywords=['dreal', 'smt', 'theorem', 'prover'], # Optional
packages=['dreal'],
include_package_data=True,
package_data={ # Optional
'dreal': ['_dreal_py.so', 'libdreal_.so'],
},
project_urls={ # Optional
'Bug Reports': 'https://github.com/dreal/dreal4/issues',
'Source': 'https://github.com/dreal/dreal4',
},
cmdclass={'build': build,
'develop': develop,
'sdist': sdist,
'bdist_egg': bdist_egg},
)
| dreal/dreal4 | setup.py | Python | apache-2.0 | 7,657 | [
"VisIt"
] | 67878c63763e99f5fbbc491967a45f3d667ee9fcaa9635bafd7f2b55d1e7a9d6 |
#!/usr/bin/python
from __future__ import print_function
import csv
import os
import sys
# Get directory to check as argument
if ( len(sys.argv) > 1 ):
rf_dir = sys.argv[1]
if ( not os.path.isdir(os.path.join("src", rf_dir)) ):
sys.stderr.write("Error: %s is not a directory\n" % rf_dir)
sys.exit(1)
else:
sys.exit(0)
# Init
my_cmds = ""
my_orphans = []
# Find source files to quarantine
with open("../abinit-orphans-7.9.1.csv", "rb") as csvfile:
for row in csv.reader(csvfile):
my_dir, my_src, my_sub, my_typ, my_act, my_sta = row
if ( my_dir == rf_dir ):
if ( (my_typ == "orphan") and \
(my_act == "quarantine") and \
(my_sta == "pending") ):
my_orphans.append(my_src)
my_cmds += "Remember to remove %s from the src/%s/abinit.src file\n" % \
(my_src, my_dir)
# Quarantine files
my_orphans = list(set(my_orphans))
for my_src in my_orphans:
os.system("bzr mv src/%s/%s src/quarantine/%s_%s" % \
(rf_dir, my_src, rf_dir, my_src))
# Display instructions
my_cmds += "Remember to mark quarantined routines as done\n"
my_cmds += "bzr commit -m \"Quarantined orphan routines from %s\"\n" % rf_dir
print(my_cmds)
| jmbeuken/abinit | developers/maintainers/process-orphans.py | Python | gpl-3.0 | 1,195 | [
"ABINIT"
] | 11fefa2ec784e9dcd39e3f23390fb7f32f97ef61c7714351dbeb6d78dc5394d4 |
"""
This is the boilerplate default configuration file.
Changes and additions to settings should be done in the config module
located in the application root rather than this config.
"""
config = {
# webapp2 sessions
'webapp2_extras.sessions': {'secret_key': '3jk4l1j3k4l13jkl34j3kl14'},
# webapp2 authentication
'webapp2_extras.auth': {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2': {'template_path': ['templates', 'boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name': "Tripstory",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang': 'en',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales': ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ','vi_VN'],
# contact page email settings
'contact_sender': "mart.kapfhammer@gmail.com",
'contact_recipient': "mart.kapfhammer@gmail.com",
# Password AES Encryption Parameters
# aes_key must be only 16 (*AES-128*), 24 (*AES-192*), or 32 (*AES-256*) bytes (characters) long.
'aes_key': "1jk4567890112345",
'salt': "34134890z45jljl45",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key': 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret': 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key': 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret': 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api': 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret': 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server': 'github.com',
'github_redirect_uri': 'http://www.example.com/social_login/github/complete',
'github_client_id': 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret': 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key': "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE",
'captcha_private_key': "PUT_YOUR_RECAPCHA_PRIVATE_KEY_HERE",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain': "YOUR_PRIMARY_DOMAIN (e.g. google.com)",
'google_analytics_code': "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates': {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login': True,
# jinja2 base layout template
'base_layout': 'base.html',
# send error emails to developers
'send_mail_developer': False,
# fellas' list
'developers': (
('Santa Klauss', 'snowypal@northpole.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email': True,
# If true, it will write in datastore a log of every visit
'log_visit': True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
| markap/TravelMap | config/localhost.py | Python | lgpl-3.0 | 4,512 | [
"VisIt"
] | c78681ce99969195e68fd8b772cd0f5d75c2c093d54bb3ae5385de4e84fb4816 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDirichletmultinomial(RPackage):
"""Dirichlet-multinomial mixture models can be used to describe
variability in microbial metagenomic data.
This package is an interface to code originally made available by
Holmes, Harris, and Quince, 2012, PLoS ONE 7(2): 1-15, as discussed
further in the man page for this package, ?DirichletMultinomial."""
homepage = "https://bioconductor.org/packages/DirichletMultinomial/"
url = "https://git.bioconductor.org/packages/DirichletMultinomial"
version('1.20.0', git='https://git.bioconductor.org/packages/DirichletMultinomial', commit='251529f301da1482551142240aeb6baf8dab2272')
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('gsl')
depends_on('r@3.4.0:')
| EmreAtes/spack | var/spack/repos/builtin/packages/r-dirichletmultinomial/package.py | Python | lgpl-2.1 | 2,070 | [
"Bioconductor"
] | 7e6fd93292d7000b355837a5dd75a5a36025975192bf3576652a1d4810cd1f09 |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 17:57:42 2016
"""
from __future__ import print_function, division
import math
import numpy as np
from datetime import datetime
from scipy.stats import norm
from numpy.linalg import cholesky, inv, slogdet
from warnings import warn
from .EPABC import create_Halton_sequence
def run_PWABC(data, simfun, distfun, prior_mean, prior_cov, epsilon,
minacc=300, samplemax=1000000, samplestep=5000,
veps=1.0, doQMC=True, verbose=1):
"""
runs PW-ABC - piecewise approximate Bayesian computation, [1]_
(likelihood-free, probabilistic inference based on a Gaussian factorisation
of the posterior)
References
----------
.. [1] White, S. R.; Kypraios, T. & Preston, S. P. Piecewise Approximate
Bayesian Computation: fast inference for discretely observed Markov
models using a factorised posterior distribution.
Statistics and Computing, 2013, 25, 289-301,
https://doi.org/10.1007/s11222-013-9432-2
"""
# record start time
starttime = datetime.now()
# number of sites (data points) x dimensionality of data points
N, D = data.shape
# number of parameters
P = prior_mean.shape[0]
if np.isscalar(veps):
veps = np.full(N, veps)
# initialise diagnostic variables:
# number of accepted samples
nacc = np.zeros(N)
# total number of samples
ntotal = np.zeros(N)
# get upper triangular form of prior covariance matrix
prior_cov_R = cholesky(prior_cov).T
# initialise Halton sequence, if desired
if doQMC:
if verbose:
print('Creating Gaussian Halton sequence ... ', end='', flush=True)
# get original Halton sequence in unit-cuboid (uniform in [0,1])
# we will reuse the same Halton sequence in each sampling step
Halton_seq = create_Halton_sequence(samplestep, P)
# transform to samples from standard normal
Halton_seq = norm.ppf(Halton_seq)
if verbose:
print('done.')
# use Halton sequence to get quasi Monte Carlo samples from the prior
Halton_samples = np.dot(Halton_seq, prior_cov_R) + prior_mean
else:
Halton_samples = None
# create sample store (parameter samples can be reused across data points)
parsamples = np.full((samplemax, N), np.nan)
# number of already stored batches of parameter samples (in sample steps)
NS = 0
# mean and covariance of posterior data factors
means = np.full((N, P), np.nan)
covs = np.full((N, P, P), np.nan)
precs = np.full((N, P, P), np.nan)
# intermediate sum of estimated precisions
Binv = np.zeros((P, P))
# intermediate sum of precision-scaled means
atmp = np.zeros(P)
# loop over data points
for dind in range(N):
# allocate memory to store accepted simulations, you only need
# minacc + samplestep elements, because the loop breaks once
# minacc is reached
samples = np.full((minacc + samplestep, P), np.nan)
# loop over simulations
for s in range(math.ceil(samplemax / samplestep)):
# determine how many samples you need to get
S = np.min([samplemax - s * samplestep, samplestep])
if doQMC:
# just use samples from Halton sequence
pars = Halton_samples[:S, :]
elif s < NS:
# reuse parameter samples from previous data points
pars = parsamples[s * samplestep : s * samplestep + S]
else:
# get fresh samples from prior
pars = np.random.normal(size=(S, P))
pars = np.dot(pars, prior_cov_R) + prior_mean
# store them for later reuse
parsamples[s * samplestep : s * samplestep + S] = pars
# simulate from model with sampled parameters
sims = simfun(pars, dind)
# get distances between simulated and real data
dists = distfun(data[dind, :], sims)
# find accepted samples
accind = dists < epsilon
naccnew = nacc[dind] + np.sum(accind)
if nacc[dind] < naccnew:
samples[nacc[dind]:naccnew, :] = pars[accind, :]
nacc[dind] = naccnew
# break if enough accepted samples
if nacc[dind] >= minacc:
break
samples = samples[:nacc[dind], :]
ntotal[dind] = np.min([samplemax, (s+1) * samplestep])
if nacc[dind] < P:
warn('Skipping site %d, ' % dind +
'because the number of accepted samples was ' +
'smaller than the number of parameters.')
else:
# get mean and covariance of accepted samples
means[dind, :] = np.mean(samples, axis=0)
covs[dind, :, :] = np.cov(samples, rowvar=0)
if nacc[dind] < minacc:
warn('The minimum number of accepted samples was not ' +
'reached for site %d (%d accepted). ' % (dind, nacc[dind]) +
'Continuing anyway, but checking for positive ' +
'definiteness of estimated covariance. Error ' +
'may follow.', RuntimeWarning)
# raises LinAlgError if cov_new is not positive definite
cholesky(covs[dind, :, :])
if dind >= 1:
precs[dind, :, :] = inv(covs[dind, :, :])
Binv += precs[dind, :, :]
atmp += np.dot(precs[dind, :, :], means[dind, :])
# print status information
if verbose and ( math.floor(dind / N * 100) <
math.floor((dind+1) / N * 100) ):
print('\r%3d%% completed' % math.floor((dind+1) / N * 100), end='');
if verbose:
# finish line by printing \n
print('')
# compute posterior covariance (Eq. 20 in White et al.)
prior_prec = inv(prior_cov)
post_cov = inv((2-N) * prior_prec + Binv)
# compute posterior mean (Eq. 21 in White et al.)
post_mean = np.dot(post_cov, (2-N) * np.dot(prior_prec, prior_mean) + atmp)
# compute log marginal likelihood
B = inv(Binv)
a = np.dot(B, atmp)
logml = np.sum(np.log(nacc[1:] / ntotal[1:])) - np.sum(np.log(veps))
for dind in range(1, N):
sd, logd = slogdet(covs[dind, :, :])
logml -= 0.5 * logd
for dind2 in range(dind+1, N):
mdiff = means[dind, :] - means[dind2, :]
logml -= 0.5 * np.dot(np.dot(np.dot(np.dot(mdiff,
precs[dind, :, :]), B), precs[dind2, :, :]), mdiff)
sd, logd = slogdet(post_cov)
logml += 0.5 * logd
sd, logd = slogdet(prior_cov)
logml += (N - 2) / 2 * logd
mdiff = a - prior_mean
logml -= 0.5 * np.dot(np.dot(mdiff, inv(prior_cov / (2 - N) + B)), mdiff)
runtime = datetime.now() - starttime
if verbose:
print('elapsed time: ' + runtime.__str__())
return post_mean, post_cov, logml, nacc, ntotal, runtime | sbitzer/pyEPABC | pyEPABC/PWABC.py | Python | bsd-3-clause | 7,076 | [
"Gaussian"
] | 7c5f1da4fee8c921cf7f7492b4b8eed82703d75db08f9abbe8c5cf79d03b1b07 |
# $HeadURL: $
''' GGUSTicketsCommand
The GGUSTickets_Command class is a command class to know about
the number of active present opened tickets.
'''
import urllib2
from DIRAC import gLogger, S_ERROR, S_OK
from DIRAC.Core.LCG.GGUSTicketsClient import GGUSTicketsClient
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class GGUSTicketsCommand( Command ):
'''
GGUSTickets "master" Command
'''
def __init__( self, args = None, clients = None ):
super( GGUSTicketsCommand, self ).__init__( args, clients )
if 'GGUSTicketsClient' in self.apis:
self.gClient = self.apis[ 'GGUSTicketsClient' ]
else:
self.gClient = GGUSTicketsClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, result ):
'''
Stores the results of doNew method on the database.
'''
for ggus in result:
resQuery = self.rmClient.addOrModifyGGUSTicketsCache( ggus[ 'GocSite' ],
ggus[ 'Link' ],
ggus[ 'OpenTickets' ],
ggus[ 'Tickets' ] )
if not resQuery[ 'OK' ]:
return resQuery
return S_OK()
def _prepareCommand( self ):
'''
GGUSTicketsCommand requires one arguments:
- elementName : <str>
GGUSTickets are associated with gocDB names, so we have to transform the
diracSiteName into a gocSiteName.
'''
if not 'name' in self.args:
return S_ERROR( '"name" not found in self.args' )
name = self.args[ 'name' ]
return getGOCSiteName( name )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName ( cannot process bulk queries.. ) contacts the
ggus client. The server is not very stable, so we protect against crashes.
If there are ggus tickets, are recorded and then returned.
'''
if masterParams is not None:
gocName = masterParams
gocNames = [ gocName ]
else:
gocName = self._prepareCommand()
if not gocName[ 'OK' ]:
return gocName
gocName = gocName[ 'Value' ]
gocNames = [ gocName ]
try:
results = self.gClient.getTicketsList( gocName )
except urllib2.URLError, e:
return S_ERROR( '%s %s' % ( gocName, e ) )
if not results[ 'OK' ]:
return results
results = results[ 'Value' ]
uniformResult = []
for gocSite, ggusResult in results.items():
if not gocSite in gocNames:
continue
ggusDict = {}
ggusDict[ 'GocSite' ] = gocSite
ggusDict[ 'Link' ] = ggusResult[ 'URL' ]
del ggusResult[ 'URL' ]
openTickets = 0
for priorityDict in ggusResult.values():
openTickets += len( priorityDict )
ggusDict[ 'Tickets' ] = ggusResult
ggusDict[ 'OpenTickets' ] = openTickets
uniformResult.append( ggusDict )
storeRes = self._storeCommand( uniformResult )
if not storeRes[ 'OK' ]:
return storeRes
return S_OK( uniformResult )
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
'''
gocName = self._prepareCommand()
if not gocName[ 'OK' ]:
return gocName
gocName = gocName[ 'Value' ]
result = self.rmClient.selectGGUSTicketsCache( gocSite = gocName )
if result[ 'OK' ]:
result = S_OK( [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ] )
return result
def doMaster( self ):
'''
Master method, which looks little bit spaguetti code, sorry !
- It gets all gocSites.
As there is no bulk query, it compares with what we have on the database.
It queries a portion of them.
'''
gocSites = CSHelpers.getGOCSites()
if not gocSites[ 'OK' ]:
return gocSites
gocSites = gocSites[ 'Value' ]
# resQuery = self.rmClient.selectGGUSTicketsCache( meta = { 'columns' : [ 'GocSite' ] } )
# if not resQuery[ 'OK' ]:
# return resQuery
# resQuery = [ element[0] for element in resQuery[ 'Value' ] ]
#
# gocNamesToQuery = set( gocSites ).difference( set( resQuery ) )
gLogger.info( 'Processing %s' % ', '.join( gocSites ) )
for gocNameToQuery in gocSites:
# if gocNameToQuery is None:
# self.metrics[ 'failed' ].append( 'None result' )
# continue
result = self.doNew( gocNameToQuery )
if not result[ 'OK' ]:
self.metrics[ 'failed' ].append( result )
return S_OK( self.metrics )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | marcelovilaca/DIRAC | ResourceStatusSystem/Command/GGUSTicketsCommand.py | Python | gpl-3.0 | 5,609 | [
"DIRAC"
] | 477110ade4b8347d1db990ca60d89fc6652064d398a6055669d4af5284d758ea |
#!/usr/bin/env python3
# -*-coding:Utf-8 -*
"""Use MCMC to find the stellar mass halo mass relation.
Based on the Behroozi et al 2010 paper.
Use a parametrization of the SHMR, plus a given HMF to find the expected SMF and compare it
to the observed SMF with its uncertainties using a likelihod maximisation.
Started on december 18th by Louis Legrand at IAP and IAS.
"""
import numpy as np
# import matplotlib.pyplot as plt
# import emcee
from astropy.cosmology import LambdaCDM
import scipy.optimize as op
from scipy import signal
import os
cwd = os.getcwd()
#################
### Load data ###
#################
"""Load HMF"""
# redshifts of the BolshoiPlanck files
redshift_haloes = np.arange(0, 10, step=0.1)
numredshift_haloes = np.size(redshift_haloes)
"""Definition of hmf_bolshoi columns :
hmf_bolshoi[redshift][:,0] = Log10(mass) [Msun]
hmf_bolshoi[redshift][:,1] = Log10(cen_mf), ie central haloes mass function
(density) [1/Mpc^3]
hmf_bolshoi[redshift][:,2] = Log10(all_macc_mf), ie all haloes mass function
(density) [1/Mpc^3]
"""
hmf_bolshoi = []
for i in range(numredshift_haloes):
hmf_bolshoi.append(
np.loadtxt('../Data/HMFBolshoiPlanck/mf_planck/mf_planck_z' +
'{:4.3f}'.format(redshift_haloes[i]) + '_mvir.dat'))
"""Load the SMF from Iary Davidzon+17"""
# redshifts of the Iari SMF
redshifts = np.array([0.2, 0.5, 0.8, 1.1, 1.5, 2, 2.5, 3, 3.5, 4.5, 5.5])
numzbin = np.size(redshifts) - 1
smf_cosmos = []
for i in range(10):
# Select the SMFs to use : tot, pas or act; D17 or SchechterFixedMs
smf_cosmos.append(
np.loadtxt('../Data/Davidzon/Davidzon+17_SMF_v3.0/mf_mass2b_fl5b_tot_VmaxFit2D' + str(i) + '.dat'))
# '../Data/Davidzon/schechter_fixedMs/mf_mass2b_fl5b_tot_VmaxFit2E'
# + str(i) + '.dat')
"""Adapt SMF to match the Bolshoi-Planck Cosmology"""
# Bolshoi-Planck cosmo : (flat LCMD)
# Om = 0.3089, Ol = 0.6911, Ob = 0.0486, h = 0.6774, s8 = 0.8159, ns = 0.9667
BP_Cosmo = LambdaCDM(H0=67.74, Om0=0.3089, Ode0=0.6911)
# Davidzon+17 SMF cosmo : (flat LCDM)
# Om = 0.3, Ol = 0.7, h=0.7
D17_Cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
for i in range(10):
# Correction of the comoving Volume :
VmaxD17 = D17_Cosmo.comoving_volume(redshifts[i+1]) - D17_Cosmo.comoving_volume(redshifts[i])
VmaxBP = BP_Cosmo.comoving_volume(redshifts[i+1]) - BP_Cosmo.comoving_volume(redshifts[i])
# VmaxD17 = get_Vmax_mod.main(redshifts[i], redshifts[i+1], cosmo=[70, 0.3, 0.7])
# VmaxBP = get_Vmax_mod.main(redshifts[i], redshifts[i+1], cosmo=[67.74, 0.3089, 0.6911])
# Add the log, equivalent to multiply by VmaxD17/VmaxBP
smf_cosmos[i][:, 1] = smf_cosmos[i][:, 1] + np.log10(VmaxD17/VmaxBP)
smf_cosmos[i][:, 2] = smf_cosmos[i][:, 2] + np.log10(VmaxD17/VmaxBP)
smf_cosmos[i][:, 3] = smf_cosmos[i][:, 3] + np.log10(VmaxD17/VmaxBP)
# Correction of the measured stellar mass
# Equivalent to multiply by (BP_Cosmo.H0/D17_Cosmo.H0)**-2
smf_cosmos[i][:, 0] = smf_cosmos[i][:, 0] - 2 * np.log10(BP_Cosmo.H0/D17_Cosmo.H0)
#######################################
### Define functions and parameters ###
#######################################
def logMh(logMs, M1, Ms0, beta, delta, gamma):
# SM-HM relation
return np.log10(M1) + beta*np.log10(logMs/Ms0) + ((logMs/Ms0)**delta)/(1 + (logMs/Ms0)**delta) - 0.5
def phi_direct(logMs1, logMs2, idx_z, M1, Ms0, beta, delta, gamma):
# SMF obtained from the SM-HM relation and the HMF
log_Mh1 = logMh(logMs1, M1, Ms0, beta, delta, gamma)
log_Mh2 = logMh(logMs2, M1, Ms0, beta, delta, gamma)
index_Mh = np.argmin(np.abs(hmf_bolshoi[idx_z][:, 0] - log_Mh1))
phidirect = 10**hmf_bolshoi[idx_z][index_Mh, 2] * (log_Mh1 - log_Mh2)/(logMs1 - logMs2)
return phidirect
Mmin = 7
Mmax = 16
numpoints = 1000
y = np.linspace(Mmin, Mmax, num=numpoints)
def lognorm(y, logMs, ksi):
return 1/np.sqrt(2 * np.pi * ksi**2) * np.exp((y-logMs)/(2*ksi**2))
def phi_true(idx_z, logMs, M1, Ms0, beta, delta, gamma, ksi):
# SMF with a log-normal scatter in stellar mass for a given halo mass
# This is the same as convolving phi_true with a log-normal density probability function
phitrue = 0
for i in range(numpoints-1):
phitrue += phi_direct(
y[i], y[i+1], idx_z, M1, Ms0, beta, delta, gamma) * lognorm(y[i], logMs, ksi)
# phitrue = np.sum(
# print(phi_direct(
# y[:-2][:], y[1:][:], idx_z, M1, Ms0, beta, delta, gamma) * lognorm(y[:-2], logMs, ksi)
# )
return phitrue
# def phi_true(idx_z, logMs, M1, Ms0, beta, delta, gamma, ksi):
# y = np.linspace(Mmin, Mmax, num=numpoints)
# phidirect = phi_direct(y[1:], y[:-1], idx_z, M1, Ms0, beta, delta, gamma)
# lognorm = signal.gaussian(50, std=ksi)
# return np.covolve()
# def phi_expect(z1, z2, logMs, M1, Ms0, beta, delta, gamma, ksi):
# # Take into account that the observed SMF is for a range of redshift
# numpoints = 10
# redshifts = np.linspace(z1, z2, num=numpoints)
# top = 0
# bot = 0
# for i in range(numpoints - 1):
# dVc = BP_Cosmo.comoving_volume(redshifts[i+1]) - BP_Cosmo.comoving_volume(redshifts[i])
# top += phi_true(redshifts[i], logMs, M1, Ms0, beta, delta, gamma, ksi) * dVc
# bot += dVc
# return top/bot
def chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi):
# return the chi**2 between the observed and the expected SMF
# z1 = redshifts[idx_z]
# z2 = redshifts[idx_z + 1]
logMs = smf_cosmos[idx_z][smf_cosmos[idx_z][:, 1] > -1000, 0] # select points where the smf is defined
numpoints = len(logMs)
chi2 = 0
for i in range(numpoints):
chi2 += (np.log10(
phi_true(idx_z, logMs[i], M1, Ms0, beta, delta, gamma, ksi) /
10**smf_cosmos[idx_z][i, 1]) / ((smf_cosmos[idx_z][i, 2] + smf_cosmos[idx_z][i, 3])/2))**2
return chi2
def negloglike(theta, idx_z):
# return the likelihood
M1, Ms0, beta, delta, gamma, ksi = theta[:]
return chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi)/2
##########################################
### Find maximum likelihood estimation ###
##########################################
idx_z = 0
theta0 = np.array([12, 11, 0.5, 0.5, 2.5, 0.15])
bounds = ((10, 14), (8, 13), (0, 2), (0, 3), (0, 5), (0, 1))
# results = op.minimize(negloglike, theta0, args=(idx_z), options={'maxiter':100})
print(negloglike(theta0, idx_z)) | Gorbagzog/StageIAP | MCMC_SHMR_old.py | Python | gpl-3.0 | 6,422 | [
"Gaussian"
] | 09e5a04bb6048f7ba7c385fdd06d0a77efc3f7d31b8e7152b9a8f9c64008d9e8 |
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
from passlib.context import CryptContext
import warnings
pwd_context = CryptContext(
schemes=[
"ldap_salted_sha1",
"ldap_md5",
"ldap_sha1",
"ldap_salted_md5",
"ldap_des_crypt",
"ldap_md5_crypt",
],
default="ldap_salted_sha1",
)
def check_password(password, encrypted):
# some old passwords have {crypt} in lower case, and passlib wants it to be
# in upper case.
if encrypted.startswith("{crypt}"):
encrypted = "{CRYPT}" + encrypted[7:]
return pwd_context.verify(password, encrypted)
def encode_password(password):
return pwd_context.encrypt(password)
class UserPassword(object):
def __init__(self):
warnings.warn(
"ldap_passwd class depreciated; do not use", DeprecationWarning)
@staticmethod
def _compareSinglePassword(password, encrypted):
return check_password(password, encrypted)
@staticmethod
def encodePassword(password, algorithm):
return encode_password(password)
| brianmay/python-tldap-debian | tldap/ldap_passwd.py | Python | gpl-3.0 | 1,733 | [
"Brian"
] | c0acae8882b430033d08ee846fe5b13f4fb4542337ecd35fc30bc4c6a77c53e8 |
# Copyright notice
# ================
#
# Copyright (C) 2010
# Lorenzo Martignoni <martignlo@gmail.com>
# Roberto Paleari <roberto.paleari@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import visitor
from syscall import *
class VisitorHTML(visitor.Visitor):
"""Outputs the syscall trace in HTML format."""
def accept_SyscallTrace(self, trace, *params, **extraparams):
r = \
"""
<html>
<head>
<title>Syscall trace for %s</title>
<script type="text/javascript">
function toggleArg(id) {
var tr = document.getElementById(id);
if (tr == null) { return; }
var bExpand = tr.style.display == '';
tr.style.display = (bExpand ? 'none' : '');
}
</script>
<style type="text/css">
table.arg {
font-size: 10px;
}
</style>
</head>
<body>
<h1>Syscall trace for %s (PID: %d)</h1>
<p>
<em>Click on a system call to show/collapse its arguments.</em>
</p>
<table>
<tr>
<th>ID</th>
<th>Name</th>
<th>Direction</th>
<th>Thread</th>
</tr>
""" % (trace.getFilename(), trace.getFilename(), trace.getPID())
for s in trace.getSyscalls():
r += s.visit(self, *params, **extraparams)
r += \
"""
</table>
</body>
</html>
"""
return r
def accept_Syscall(self, syscall, *params, **extraparams):
style = ""
if syscall.isPre():
direction = "PRE"
else:
direction = "POST"
argid = "%s-%s" % (syscall.getID(), direction)
r = \
"""
<tr style="%s" onclick="javascript:toggleArg('%s');">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>""" % (style, argid, syscall.getID(), syscall.getName(), direction, syscall.getThreadID())
# build arguments row
r += \
"""
<tr id="%s" style="display: none;">
<td colspan="4">
<table class="arg" border="1">""" % (argid)
if len(syscall.getArgument()) > 0:
for i in syscall.getArgument():
r += "\n" + syscall.getArgument(i).visit(self, *params, **extraparams)
else:
# no arguments
r += "<tr><td><em>no arguments</em></td></tr>"
r += \
"""
</table>
</td>
</tr>
"""
return r
def accept_SyscallArg(self, arg, *params, **extraparams):
r = "<tr>"
r += "<td>%d</td>" % arg.getNum()
r += "<td>%s (%s)</td>" % (arg.getVal().getName(), arg.getVal().getType())
r += "<td>%s</td>" % ((arg.isOptional() and "OPTIONAL") or "MANDATORY")
r += "<td>%s</td>" % arg.getVal().visit(self, *params, **extraparams)
r += "</tr>"
return r
def accept_BasicArgument(self, val, *params, **extraparams):
return str(val.getValue())
def accept_StructArgument(self, val, *params, **extraparams):
r = "<ul>"
for n in val:
f = val.getField(n)
r += "<li>%s (%s)" % (f.getName(), f.getType())
if f.isBasic():
r += " = %s" % f.getValue()
else:
r += f.visit(self, *params, **extraparams)
r += "</li>"
r += "</ul>"
return r
| shotgunner/wusstrace | pywuss/visitor/htmlify.py | Python | gpl-3.0 | 3,918 | [
"VisIt"
] | e3690a9136de4e5a0b950e7b4ea05397443cd932777cc4fb25866ada3e53d107 |
"""
Spectroscopy class
This file contains the Spectroscopy class that forms the basis analysis of all
the spectroscopy measurement analyses.
"""
import logging
import numpy as np
import lmfit
import pycqed.analysis_v2.base_analysis as ba
import pycqed.analysis.fitting_models as fit_mods
import pandas as pd
import matplotlib.pyplot as plt
import pycqed.analysis.fit_toolbox.geometry as geo
from collections import OrderedDict
from scipy import integrate
log = logging.getLogger(__name__)
class SpectroscopyOld(ba.BaseDataAnalysis):
def __init__(self, t_start: str = None,
t_stop: str = None,
options_dict: dict = None,
label: str = None,
extract_only: bool = False,
auto: bool = True,
do_fitting: bool = False):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting)
self.extract_fitparams = self.options_dict.get('fitparams', False)
self.params_dict = {'freq_label': 'sweep_name',
'freq_unit': 'sweep_unit',
'measurementstring': 'measurementstring',
'freq': 'sweep_points',
'amp': 'amp',
'phase': 'phase'}
self.options_dict.get('xwidth', None)
# {'xlabel': 'sweep_name',
# 'xunit': 'sweep_unit',
# 'measurementstring': 'measurementstring',
# 'sweep_points': 'sweep_points',
# 'value_names': 'value_names',
# 'value_units': 'value_units',
# 'measured_values': 'measured_values'}
if self.extract_fitparams:
self.params_dict.update({'fitparams': self.options_dict.get('fitparams_key', 'fit_params')})
self.numeric_params = ['freq', 'amp', 'phase']
if 'qubit_label' in self.options_dict:
self.labels.extend(self.options_dict['qubit_label'])
sweep_param = self.options_dict.get('sweep_param', None)
if sweep_param is not None:
self.params_dict.update({'sweep_param': sweep_param})
self.numeric_params.append('sweep_param')
if auto is True:
self.run_analysis()
def process_data(self):
proc_data_dict = self.proc_data_dict
proc_data_dict['freq_label'] = 'Frequency (GHz)'
proc_data_dict['amp_label'] = 'Transmission amplitude (arb. units)'
proc_data_dict['phase_label'] = 'Transmission phase (degrees)'
proc_data_dict['freq_range'] = self.options_dict.get(
'freq_range', None)
proc_data_dict['amp_range'] = self.options_dict.get('amp_range', None)
proc_data_dict['phase_range'] = self.options_dict.get(
'phase_range', None)
proc_data_dict['plotsize'] = self.options_dict.get('plotsize', (8, 5))
# FIXME: Nathan : I still don't think using raw_data_dict as a tuple
# in case of multi timestamps is a good idea, unless it is also
# a tuple of length 1 in the case of 1 timestamp. otherwise we
# have to add checks like this one everywhere
if not isinstance(self.raw_data_dict, (tuple, list)):
proc_data_dict['plot_frequency'] = np.squeeze(
self.raw_data_dict['freq'])
proc_data_dict['plot_amp'] = np.squeeze(self.raw_data_dict['amp'])
proc_data_dict['plot_phase'] = np.squeeze(
self.raw_data_dict['phase'])
else:
# TRANSPOSE ALSO NEEDS TO BE CODED FOR 2D
sweep_param = self.options_dict.get('sweep_param', None)
if sweep_param is not None:
proc_data_dict['plot_xvals'] = np.array(
self.raw_data_dict['sweep_param'])
proc_data_dict['plot_xvals'] = np.reshape(proc_data_dict['plot_xvals'],
(len(proc_data_dict['plot_xvals']), 1))
proc_data_dict['plot_xlabel'] = self.options_dict.get(
'xlabel', sweep_param)
else:
xvals = np.array([[tt] for tt in range(
len(self.raw_data_dict))])
proc_data_dict['plot_xvals'] = self.options_dict.get(
'xvals', xvals)
proc_data_dict['plot_xlabel'] = self.options_dict.get(
'xlabel', 'Scan number')
proc_data_dict['plot_xwidth'] = self.options_dict.get(
'xwidth', None)
if proc_data_dict['plot_xwidth'] == 'auto':
x_diff = np.diff(np.ravel(proc_data_dict['plot_xvals']))
dx1 = np.concatenate(([x_diff[0]], x_diff))
dx2 = np.concatenate((x_diff, [x_diff[-1]]))
proc_data_dict['plot_xwidth'] = np.minimum(dx1, dx2)
proc_data_dict['plot_frequency'] = np.array([self.raw_data_dict[i]['hard_sweep_points']
for i in
range(len(self.raw_data_dict))])
proc_data_dict['plot_phase'] = np.array([self.raw_data_dict[i][
'measured_data']['Phase']
for i in
range(len(
self.raw_data_dict))])
proc_data_dict['plot_amp'] = np.array([self.raw_data_dict[i][
'measured_data']['Magn']
for i in
range(len(
self.raw_data_dict))])
else:
# manual setting of plot_xwidths
proc_data_dict['plot_frequency'] = [self.raw_data_dict[i]['hard_sweep_points']
for i in
range(len(self.raw_data_dict))]
proc_data_dict['plot_phase'] = [self.raw_data_dict[i][
'measured_data']['Phase']
for i in
range(len(self.raw_data_dict))]
proc_data_dict['plot_amp'] = [self.raw_data_dict[i][
'measured_data']['Magn']
for i in
range(len(self.raw_data_dict))]
def prepare_plots(self):
proc_data_dict = self.proc_data_dict
plotsize = self.options_dict.get('plotsize')
if len(self.raw_data_dict['timestamps']) == 1:
plot_fn = self.plot_line
self.plot_dicts['amp'] = {'plotfn': plot_fn,
'xvals': proc_data_dict['plot_frequency'],
'yvals': proc_data_dict['plot_amp'],
'title': 'Spectroscopy amplitude: %s' % (self.timestamps[0]),
'xlabel': proc_data_dict['freq_label'],
'ylabel': proc_data_dict['amp_label'],
'yrange': proc_data_dict['amp_range'],
'plotsize': plotsize
}
self.plot_dicts['phase'] = {'plotfn': plot_fn,
'xvals': proc_data_dict['plot_frequency'],
'yvals': proc_data_dict['plot_phase'],
'title': 'Spectroscopy phase: %s' % (self.timestamps[0]),
'xlabel': proc_data_dict['freq_label'],
'ylabel': proc_data_dict['phase_label'],
'yrange': proc_data_dict['phase_range'],
'plotsize': plotsize
}
else:
self.plot_dicts['amp'] = {'plotfn': self.plot_colorx,
'xvals': proc_data_dict['plot_xvals'],
'xwidth': proc_data_dict['plot_xwidth'],
'yvals': proc_data_dict['plot_frequency'],
'zvals': proc_data_dict['plot_amp'],
'title': 'Spectroscopy amplitude: %s' % (self.timestamps[0]),
'xlabel': proc_data_dict['plot_xlabel'],
'ylabel': proc_data_dict['freq_label'],
'zlabel': proc_data_dict['amp_label'],
'yrange': proc_data_dict['freq_range'],
'zrange': proc_data_dict['amp_range'],
'plotsize': plotsize,
'plotcbar': self.options_dict.get('colorbar', False),
}
self.plot_dicts['amp'] = {'plotfn': self.plot_colorx,
'xvals': proc_data_dict['plot_xvals'],
'yvals': proc_data_dict['plot_frequency'],
'zvals': proc_data_dict['plot_amp'],
}
def plot_for_presentation(self, key_list=None, no_label=False):
super().plot_for_presentation(
key_list=key_list, no_label=no_label)
for key in key_list:
pdict = self.plot_dicts[key]
if key == 'amp':
if pdict['plotfn'] == self.plot_line:
ymin, ymax = 0, 1.2 * np.max(np.ravel(pdict['yvals']))
self.axs[key].set_ylim(ymin, ymax)
self.axs[key].set_ylabel('Transmission amplitude (V rms)')
class ResonatorSpectroscopy(SpectroscopyOld):
def __init__(self, t_start,
options_dict=None,
t_stop=None,
do_fitting=False,
extract_only=False,
auto=True):
super(ResonatorSpectroscopy, self).__init__(t_start, t_stop=t_stop,
options_dict=options_dict,
extract_only=extract_only,
auto=False,
do_fitting=do_fitting)
self.do_fitting = do_fitting
self.fitparams_guess = self.options_dict.get('fitparams_guess', {})
self.simultan = self.options_dict.get('simultan', False)
if self.simultan:
if not (len(t_start) == 2 and t_stop is None):
raise ValueError('Exactly two timestamps need to be passed for'
' simultan resonator spectroscopy in ground '
'and excited state as: t_start = [t_on, t_off]')
if auto is True:
self.run_analysis()
def process_data(self):
super(ResonatorSpectroscopy, self).process_data()
self.proc_data_dict['amp_label'] = 'Transmission amplitude (V rms)'
self.proc_data_dict['phase_label'] = 'Transmission phase (degrees)'
if len(self.raw_data_dict) == 1:
self.proc_data_dict['plot_phase'] = np.unwrap(np.pi / 180. *
self.proc_data_dict['plot_phase']) * 180 / np.pi
self.proc_data_dict['plot_xlabel'] = 'Readout Frequency (Hz)'
else:
pass
def prepare_fitting(self):
super().prepare_fitting()
# Fitting function for one data trace. The fitted data can be
# either complex, amp(litude) or phase. The fitting models are
# HangerFuncAmplitude, HangerFuncComplex,
# PolyBgHangerFuncAmplitude, SlopedHangerFuncAmplitude,
# SlopedHangerFuncComplex, hanger_with_pf.
fit_options = self.options_dict.get('fit_options', None)
subtract_background = self.options_dict.get(
'subtract_background', False)
if fit_options is None:
fitting_model = 'hanger'
else:
fitting_model = fit_options['model']
if subtract_background:
self.do_subtract_background(thres=self.options_dict['background_thres'],
back_dict=self.options_dict['background_dict'])
if fitting_model == 'hanger':
fit_fn = fit_mods.SlopedHangerFuncAmplitude
fit_guess_fn = fit_mods.SlopedHangerFuncAmplitudeGuess
guess_pars = None
elif fitting_model == 'simple_hanger':
fit_fn = fit_mods.HangerFuncAmplitude
raise NotImplementedError(
'This functions guess function is not coded up yet')
# TODO HangerFuncAmplitude Guess
elif fitting_model == 'lorentzian':
raise NotImplementedError(
'This functions guess function is not coded up yet')
fit_fn = fit_mods.Lorentzian
# TODO LorentzianGuess
elif fitting_model == 'complex':
raise NotImplementedError(
'This functions guess function is not coded up yet')
fit_fn = fit_mods.HangerFuncComplex
# TODO HangerFuncComplexGuess
elif fitting_model == 'hanger_with_pf':
if self.simultan:
fit_fn = fit_mods.simultan_hanger_with_pf
self.sim_fit = fit_mods.fit_hanger_with_pf(
fit_mods.SimHangerWithPfModel,[
np.transpose([self.proc_data_dict['plot_frequency'][0],
self.proc_data_dict['plot_amp'][0]]),
np.transpose([self.proc_data_dict['plot_frequency'][1],
self.proc_data_dict['plot_amp'][1]])],
simultan=True)
guess_pars = None
fit_guess_fn = None
x_fit_0 = self.proc_data_dict['plot_frequency'][0]
self.chi = (self.sim_fit[1].params['omega_ro'].value -
self.sim_fit[0].params['omega_ro'].value)/2
self.f_RO_res = (self.sim_fit[0].params['omega_ro'].value+
self.sim_fit[1].params['omega_ro'].value)/2
self.f_PF = self.sim_fit[0].params['omega_pf'].value
self.kappa = self.sim_fit[0].params['kappa_pf'].value
self.J_ = self.sim_fit[0].params['J'].value
else:
fit_fn = fit_mods.hanger_with_pf
fit_temp = fit_mods.fit_hanger_with_pf(
fit_mods.HangerWithPfModel,
np.transpose([self.proc_data_dict['plot_frequency'],
self.proc_data_dict['plot_amp']]))
guess_pars = fit_temp.params
self.proc_data_dict['fit_params'] = fit_temp.params
fit_guess_fn = None
if (len(self.raw_data_dict['timestamps']) == 1) or self.simultan:
self.fit_dicts['reso_fit'] = {
'fit_fn': fit_fn,
'fit_guess_fn': fit_guess_fn,
'guess_pars': guess_pars,
'fit_yvals': {
'data': self.proc_data_dict['plot_amp']
},
'fit_xvals': {
'f': self.proc_data_dict['plot_frequency']}
}
else:
self.fit_dicts['reso_fit'] = {
'fit_fn': fit_fn,
'fit_guess_fn': fit_guess_fn,
'guess_pars': guess_pars,
'fit_yvals': [{'data': np.squeeze(tt)}
for tt in self.plot_amp],
'fit_xvals': np.squeeze([{'f': tt[0]}
for tt in self.plot_frequency])}
def run_fitting(self):
if not self.simultan:
super().run_fitting()
def do_subtract_background(self, thres=None, back_dict=None, ):
if len(self.raw_data_dict['timestamps']) == 1:
pass
else:
x_filtered = []
y_filtered = []
for tt in range(len(self.raw_data_dict['timestamps'])):
y = np.squeeze(self.plot_amp[tt])
x = np.squeeze(self.plot_frequency)[tt]
guess_dict = SlopedHangerFuncAmplitudeGuess(y, x)
Q = guess_dict['Q']['value']
f0 = guess_dict['f0']['value']
df = 2 * f0 / Q
fmin = f0 - df
fmax = f0 + df
indices = np.logical_or(x < fmin * 1e9, x > fmax * 1e9)
x_filtered.append(x[indices])
y_filtered.append(y[indices])
self.background = pd.concat([pd.Series(y_filtered[tt], index=x_filtered[tt])
for tt in range(len(self.raw_data_dict['timestamps']))], axis=1).mean(axis=1)
background_vals = self.background.reset_index().values
freq = background_vals[:, 0]
amp = background_vals[:, 1]
# thres = 0.0065
indices = amp < thres
freq = freq[indices] * 1e-9
amp = amp[indices]
fit_fn = double_cos_linear_offset
model = lmfit.Model(fit_fn)
fit_yvals = amp
fit_xvals = {'t': freq}
for key, val in list(back_dict.items()):
model.set_param_hint(key, **val)
params = model.make_params()
fit_res = model.fit(fit_yvals,
params=params,
**fit_xvals)
self.background_fit = fit_res
for tt in range(len(self.raw_data_dict['timestamps'])):
divide_vals = fit_fn(np.squeeze(self.plot_frequency)[tt] * 1e-9, **fit_res.best_values)
self.plot_amp[tt] = np.array(
[np.array([np.divide(np.squeeze(self.plot_amp[tt]), divide_vals)])]).transpose()
def prepare_plots(self):
if not self.simultan:
super(ResonatorSpectroscopy, self).prepare_plots()
else:
proc_data_dict = self.proc_data_dict
plotsize = self.options_dict.get('plotsize')
plot_fn = self.plot_line
amp_diff = np.abs(proc_data_dict['plot_amp'][0]*np.exp(
1j*np.pi*proc_data_dict['plot_phase'][0]/180)-
proc_data_dict['plot_amp'][1]*np.exp(
1j*np.pi*proc_data_dict['plot_phase'][1]/180))
# FIXME: Nathan 2019.05.08 I don't think this is the right place to adapt
# the ro fequency (i.e. in prepare_plot)... I had a hard time finding
# where it happened !
self.f_RO = proc_data_dict['plot_frequency'][0][np.argmax(amp_diff)]
self.plot_dicts['amp1'] = {'plotfn': plot_fn,
'ax_id': 'amp',
'xvals': proc_data_dict['plot_frequency'][0],
'yvals': proc_data_dict['plot_amp'][0],
'title': 'Spectroscopy amplitude: \n'
'%s-%s' % (
self.raw_data_dict[0][
'measurementstring'],
self.timestamps[0]),
'xlabel': proc_data_dict['freq_label'],
'xunit': 'Hz',
'ylabel': proc_data_dict['amp_label'],
'yrange': proc_data_dict['amp_range'],
'plotsize': plotsize,
'color': 'b',
'linestyle': '',
'marker': 'o',
'setlabel': '|g> data',
'do_legend': True
}
self.plot_dicts['amp2'] = {'plotfn': plot_fn,
'ax_id': 'amp',
'xvals': proc_data_dict['plot_frequency'][1],
'yvals': proc_data_dict['plot_amp'][1],
'color': 'r',
'linestyle': '',
'marker': 'o',
'setlabel': '|e> data',
'do_legend': True
}
self.plot_dicts['diff'] = {'plotfn': plot_fn,
'ax_id': 'amp',
'xvals': proc_data_dict['plot_frequency'][0],
'yvals': amp_diff,
'color': 'g',
'linestyle': '',
'marker': 'o',
'setlabel': 'diff',
'do_legend': True
}
self.plot_dicts['phase'] = {'plotfn': plot_fn,
'xvals': proc_data_dict['plot_frequency'],
'yvals': proc_data_dict['plot_phase'],
'title': 'Spectroscopy phase: '
'%s' % (self.timestamps[0]),
'xlabel': proc_data_dict['freq_label'],
'ylabel': proc_data_dict['phase_label'],
'yrange': proc_data_dict['phase_range'],
'plotsize': plotsize
}
def plot_fitting(self):
if self.do_fitting:
fit_options = self.options_dict.get('fit_options', None)
if fit_options is None:
fitting_model = 'hanger'
else:
fitting_model = fit_options['model']
for key, fit_dict in self.fit_dicts.items():
if not self.simultan:
fit_results = fit_dict['fit_res']
else:
fit_results = self.sim_fit
ax = self.axs['amp']
if len(self.raw_data_dict['timestamps']) == 1 or self.simultan:
if fitting_model == 'hanger':
ax.plot(list(fit_dict['fit_xvals'].values())[0],
fit_results.best_fit, 'r-', linewidth=1.5)
textstr = 'f0 = %.5f $\pm$ %.1g GHz' % (
fit_results.params['f0'].value,
fit_results.params['f0'].stderr) + '\n' \
'Q = %.4g $\pm$ %.0g' % (
fit_results.params['Q'].value,
fit_results.params['Q'].stderr) + '\n' \
'Qc = %.4g $\pm$ %.0g' % (
fit_results.params['Qc'].value,
fit_results.params['Qc'].stderr) + '\n' \
'Qi = %.4g $\pm$ %.0g' % (
fit_results.params['Qi'].value,
fit_results.params['Qi'].stderr)
box_props = dict(boxstyle='Square',
facecolor='white', alpha=0.8)
self.box_props = {key: val for key,
val in box_props.items()}
self.box_props.update({'linewidth': 0})
self.box_props['alpha'] = 0.
ax.text(0.03, 0.95, textstr, transform=ax.transAxes,
verticalalignment='top', bbox=self.box_props)
elif fitting_model == 'simple_hanger':
raise NotImplementedError(
'This functions guess function is not coded up yet')
elif fitting_model == 'lorentzian':
raise NotImplementedError(
'This functions guess function is not coded up yet')
elif fitting_model == 'complex':
raise NotImplementedError(
'This functions guess function is not coded up yet')
elif fitting_model == 'hanger_with_pf':
if not self.simultan:
ax.plot(list(fit_dict['fit_xvals'].values())[0],
fit_results.best_fit, 'r-', linewidth=1.5)
par = ["%.3f" %(fit_results.params['omega_ro'].value*1e-9),
"%.3f" %(fit_results.params['omega_pf'].value*1e-9),
"%.3f" %(fit_results.params['kappa_pf'].value*1e-6),
"%.3f" %(fit_results.params['J'].value*1e-6),
"%.3f" %(fit_results.params['gamma_ro'].value*1e-6)]
textstr = str('f_ro = '+par[0]+' GHz'
+'\n\nf_pf = '+par[1]+' GHz'
+'\n\nkappa = '+par[2]+' MHz'
+'\n\nJ = '+par[3]+' MHz'
+'\n\ngamma_ro = '+par[4]+' MHz')
ax.plot([0],
[0],
'w',
label=textstr)
else:
x_fit_0 = np.linspace(min(
self.proc_data_dict['plot_frequency'][0][0],
self.proc_data_dict['plot_frequency'][1][0]),
max(self.proc_data_dict['plot_frequency'][0][-1],
self.proc_data_dict['plot_frequency'][1][-1]),
len(self.proc_data_dict['plot_frequency'][0]))
x_fit_1 = np.linspace(min(
self.proc_data_dict['plot_frequency'][0][0],
self.proc_data_dict['plot_frequency'][1][0]),
max(self.proc_data_dict['plot_frequency'][0][-1],
self.proc_data_dict['plot_frequency'][1][-1]),
len(self.proc_data_dict['plot_frequency'][1]))
ax.plot(x_fit_0,
fit_results[0].eval(
fit_results[0].params,
f=x_fit_0),
'b--', linewidth=1.5, label='|g> fit')
ax.plot(x_fit_1,
fit_results[1].eval(
fit_results[1].params,
f=x_fit_1),
'r--', linewidth=1.5, label='|e> fit')
f_RO = self.f_RO
ax.plot([f_RO, f_RO],
[0,max(max(self.raw_data_dict['amp'][0]),
max(self.raw_data_dict['amp'][1]))],
'k--', linewidth=1.5)
par = ["%.3f" %(fit_results[0].params['gamma_ro'].value*1e-6),
"%.3f" %(fit_results[0].params['omega_pf'].value*1e-9),
"%.3f" %(fit_results[0].params['kappa_pf'].value*1e-6),
"%.3f" %(fit_results[0].params['J'].value*1e-6),
"%.3f" %(fit_results[0].params['omega_ro'].value*1e-9),
"%.3f" %(fit_results[1].params['omega_ro'].value*1e-9),
"%.3f" %((fit_results[1].params['omega_ro'].value-
fit_results[0].params['omega_ro'].value)
/2*1e-6)]
textstr = str('\n\nkappa = '+par[2]+' MHz'
+'\n\nJ = '+par[3]+' MHz'
+'\n\nchi = '+par[6]+' MHz'
+'\n\nf_pf = '+par[1]+' GHz'
+'\n\nf_rr |g> = '+par[4]+' GHz'
+'\n\nf_rr |e> = '+par[5]+' GHz'
+'\n\nf_RO = '+"%.3f" %(f_RO*1e-9)+''
' GHz'
)
ax.plot([f_RO],
[0],
'w--', label=textstr)
# box_props = dict(boxstyle='Square',
# facecolor='white', alpha=0.8)
# self.box_props = {key: val for key,
# val in box_props.items()}
# self.box_props.update({'linewidth': 0})
# self.box_props['alpha'] = 0.
#
ax.legend(loc='upper left', bbox_to_anchor=[1, 1])
else:
reso_freqs = [fit_results[tt].params['f0'].value *
1e9 for tt in range(len(self.raw_data_dict['timestamps']))]
ax.plot(np.squeeze(self.plot_xvals),
reso_freqs,
'o',
color='m',
markersize=3)
def plot(self, key_list=None, axs_dict=None, presentation_mode=None, no_label=False):
super(ResonatorSpectroscopy, self).plot(key_list=key_list,
axs_dict=axs_dict,
presentation_mode=presentation_mode)
if self.do_fitting:
self.plot_fitting()
class ResonatorSpectroscopy_v2(SpectroscopyOld):
def __init__(self, t_start=None,
options_dict=None,
t_stop=None,
do_fitting=False,
extract_only=False,
auto=True, **kw):
"""
FIXME: Nathan: the dependency on the # of timestamps is carried
through the entire class and is horrible. We should loop and make fits
separately, instead of using the simultan parameter.
It would be much simpler!
Args:
t_start:
options_dict:
ref_state: reference state timestamp when comparing several
spectra. Most of the time it will be timestamp of ground
state.
# TODO: Nathan: merge with fit_options (?)
qutrit_fit_options: dict with options for qutrit RO frequency
fitting.
sigma_init: initial noise standard deviation assumed
for distribution of point in IQ plane. Assumed to
be large and algorithm will reduce it.
target_fidelity: target fidelity
max_width_at_max_fid: maximum width (in Hz) when
searching for appropriate sigma
t_stop:
do_fitting:
extract_only:
auto:
"""
super(ResonatorSpectroscopy_v2, self).__init__(t_start=t_start,
t_stop=t_stop,
options_dict=options_dict,
extract_only=extract_only,
auto=False,
do_fitting=do_fitting,
**kw)
self.do_fitting = do_fitting
self.fitparams_guess = self.options_dict.get('fitparams_guess', {})
if auto is True:
self.run_analysis()
def process_data(self):
super(ResonatorSpectroscopy_v2, self).process_data()
self.proc_data_dict['amp_label'] = 'Transmission amplitude (V rms)'
self.proc_data_dict['phase_label'] = 'Transmission phase (degrees)'
# now assumes the raw data dict is a tuple due to aa1ed4cdf546
n_spectra = len(self.raw_data_dict)
self.proc_data_dict['plot_xlabel'] = 'Readout Frequency (Hz)'
if self.options_dict.get('ref_state', None) is None:
default_ref_state = 'g'
message = "Analyzing spectra of {} states but no ref_state " \
"was passed. Assuming timestamp[0]: {} is the " \
"timestamp of reference state with label {}"
log.warning(
message.format(n_spectra, self.raw_data_dict[0]['timestamp'],
default_ref_state))
self.ref_state = default_ref_state
else:
self.ref_state = self.options_dict['ref_state']
spectra_mapping = \
self.options_dict.get("spectra_mapping",
self._default_spectra_mapping())
spectra = {state: self.raw_data_dict[i]["measured_data"]['Magn'] *
np.exp(1j * np.pi *
self.raw_data_dict[i]["measured_data"]['Phase'] / 180.)
for i, state in enumerate(spectra_mapping.keys())}
iq_distance = {state + self.ref_state:
np.abs(spectra[state] - spectra[self.ref_state])
for state in spectra_mapping.keys()
if state != self.ref_state}
for state_i in spectra_mapping:
for state_j in spectra_mapping:
if not state_i + state_j in iq_distance and \
state_i != state_j:
# both ij and ji will have entries which will have
# the same values but this is not a problem per se.
iq_distance[state_i + state_j] = \
np.abs(spectra[state_i] - spectra[state_j])
self.proc_data_dict["spectra_mapping"] = spectra_mapping
self.proc_data_dict["spectra"] = spectra
self.proc_data_dict["iq_distance"] = iq_distance
self.proc_data_dict["fit_raw_results"] = OrderedDict()
def _default_spectra_mapping(self):
default_levels_order = ('g', 'e', 'f')
# assumes raw_data_dict is tuple
tts = [d['timestamp'] for d in self.raw_data_dict]
spectra_mapping = {default_levels_order[i]: tt
for i, tt in enumerate(tts)}
msg = "Assuming following mapping templates of spectra: {}." \
"\nspectra_mapping can be used in options_dict to modify" \
"this behavior."
log.warning(msg.format(spectra_mapping))
return spectra_mapping
def prepare_fitting(self):
super().prepare_fitting()
# Fitting function for one data trace. The fitted data can be
# either complex, amp(litude) or phase. The fitting models are
# HangerFuncAmplitude, HangerFuncComplex,
# PolyBgHangerFuncAmplitude, SlopedHangerFuncAmplitude,
# SlopedHangerFuncComplex, hanger_with_pf.
fit_options = self.options_dict.get('fit_options', dict())
subtract_background = \
self.options_dict.get('subtract_background', False)
fitting_model = fit_options.get('model', 'hanger')
self.proc_data_dict['fit_results'] = OrderedDict()
self.fit_res = dict()
if subtract_background:
log.warning("Substract background might not work and has "
"not been tested.")
self.do_subtract_background(
thres=self.options_dict['background_thres'],
back_dict=self.options_dict['background_dict'])
if fitting_model == 'hanger':
fit_fn = fit_mods.SlopedHangerFuncAmplitude
fit_guess_fn = fit_mods.SlopedHangerFuncAmplitudeGuess
guess_pars = None
elif fitting_model == 'simple_hanger':
fit_fn = fit_mods.HangerFuncAmplitude
raise NotImplementedError(
'This functions guess function is not coded up yet')
# TODO HangerFuncAmplitude Guess
elif fitting_model == 'lorentzian':
raise NotImplementedError(
'This functions guess function is not coded up yet')
fit_fn = fit_mods.Lorentzian
# TODO LorentzianGuess
elif fitting_model == 'complex':
raise NotImplementedError(
'This functions guess function is not coded up yet')
fit_fn = fit_mods.HangerFuncComplex
# TODO HangerFuncComplexGuess
elif fitting_model == 'hanger_with_pf':
if not isinstance(self.raw_data_dict, tuple):
# single fit
fit_fn = fit_mods.hanger_with_pf
fit_temp = fit_mods.fit_hanger_with_pf(
fit_mods.HangerWithPfModel,
np.transpose([self.proc_data_dict['plot_frequency'],
self.proc_data_dict['plot_amp']]))
guess_pars = fit_temp.params
self.proc_data_dict['fit_params'] = fit_temp.params
self.proc_data_dict['fit_raw_results'][self.ref_state] = \
fit_temp.params
fit_guess_fn = None
else:
pass
# comparative fit to reference state
# FIXME: Nathan: I guess here only fit dicts should be created
# and then passed to run_fitting() of basis class but this is
# # not done here. Instead, fitting seems to be done here.
# ref_spectrum = self.proc_data_dict['spectra'][self.ref_state]
# for state, spectrum in self.proc_data_dict['spectra'].items():
# if state == self.ref_state:
# continue
# key = self.ref_state + state
# fit_fn = fit_mods.simultan_hanger_with_pf
# fit_results = fit_mods.fit_hanger_with_pf(
# fit_mods.SimHangerWithPfModel, [
# np.transpose(
# [self.proc_data_dict['plot_frequency'][0],
# np.abs(ref_spectrum)]),
# np.transpose(
# [self.proc_data_dict['plot_frequency'][0],
# np.abs(spectrum)])],
# simultan=True)
# self.proc_data_dict['fit_raw_results'][key] = fit_results
# guess_pars = None
# fit_guess_fn = None
#
# chi = (fit_results[1].params['omega_ro'].value -
# fit_results[0].params['omega_ro'].value) / 2
# f_RO_res = (fit_results[0].params['omega_ro'].value +
# fit_results[1].params['omega_ro'].value) / 2
# f_PF = fit_results[0].params['omega_pf'].value
# kappa = fit_results[0].params['kappa_pf'].value
# J_ = fit_results[0].params['J'].value
# f_RO = self.find_f_RO([self.ref_state, state])
# self.fit_res[key] = \
# dict(chi=chi, f_RO_res=f_RO_res, f_PF=f_PF,
# kappa=kappa, J_=J_, f_RO=f_RO)
#
# if not isinstance(self.raw_data_dict, tuple ):
# self.fit_dicts['reso_fit'] = {
# 'fit_fn': fit_fn,
# 'fit_guess_fn': fit_guess_fn,
# 'guess_pars': guess_pars,
# 'fit_yvals': {'data': self.proc_data_dict['plot_amp']},
# 'fit_xvals': { 'f': self.proc_data_dict['plot_frequency']}}
def find_f_RO(self, states):
"""
Finds the best readout frequency of the list of states.
If one state is passed, the resonator frequency is returned.
If two states are passed, the frequency with maximal difference
between the two states in IQ plane is returned (optimal qubit nRO freq).
If three states are passed, optimal frequency is found by finding
the highest variance allowing a target fidelity to be reached on a
narrow frequency interval. (optimal qutrit RO_freq)
Args:
states: list of states between which readout frequency
should be found
Returns:
"""
key = "".join(states)
if len(states) == 1:
f_RO = self.proc_data_dict['plot_frequency'][0][
np.argmax(self.proc_data_dict['spectra'][key])]
elif len(states) == 2:
f_RO = self.proc_data_dict['plot_frequency'][0][
np.argmax(self.proc_data_dict['iq_distance'][key])]
elif len(states) == 3:
f_RO, raw_results = self._find_f_RO_qutrit(
self.proc_data_dict['spectra'],
self.proc_data_dict['plot_frequency'][0],
**self.options_dict.get('qutrit_fit_options', dict()))
self.proc_data_dict["fit_raw_results"][key] = raw_results
else:
raise ValueError("{} states were given but method expects 1, "
"2 or 3 states.")
return f_RO
@staticmethod
def _find_f_RO_qutrit(spectra, freqs, sigma_init=0.01,
return_full=True, **kw):
n_iter = 0
avg_fidelities = OrderedDict()
single_level_fidelities = OrderedDict()
optimal_frequency = []
sigmas = [sigma_init]
log.debug("###### Starting Analysis to find qutrit f_RO ######")
while ResonatorSpectroscopy_v2.update_sigma(avg_fidelities, sigmas, freqs,
optimal_frequency, n_iter, **kw):
log.debug("Iteration {}".format(n_iter))
sigma = sigmas[-1]
if sigma in avg_fidelities.keys():
continue
else:
avg_fidelity, single_level_fidelity = \
ResonatorSpectroscopy_v2.three_gaussians_overlap(spectra, sigma)
avg_fidelities[sigma] = avg_fidelity
single_level_fidelities[sigma] = single_level_fidelity
n_iter += 1
raw_results = dict(avg_fidelities=avg_fidelities,
single_level_fidelities=single_level_fidelities,
sigmas=sigmas, optimal_frequency=optimal_frequency)
qutrit_key = "".join(list(spectra))
log.debug("###### Finished Analysis. Optimal f_RO: {} ######"
.format(optimal_frequency[-1]))
return optimal_frequency[-1], raw_results if return_full else \
optimal_frequency[-1]
@staticmethod
def update_sigma(avg_fidelities, sigmas, freqs,
optimal_frequency, n_iter, n_iter_max=20,
target_fidelity=0.99, max_width_at_max_fid=0.2e6, **kw):
continue_search = True
if n_iter >= n_iter_max:
log.warning("Could not converge to a proper RO frequency" \
"within {} iterations. Returning best frequency found so far. "
"Consider changing log_bounds".format(n_iter_max))
continue_search = False
elif len(avg_fidelities.keys()) == 0:
# search has not started yet
continue_search = True
else:
delta_freq = freqs[1] - freqs[0]
if max_width_at_max_fid < delta_freq:
msg = "max_width_at_max_fid cannot be smaller than the " \
"difference between two frequency data points.\n" \
"max_width_at_max_fid: {}\nDelta freq: {}"
log.warning(msg.format(max_width_at_max_fid, delta_freq))
max_width_at_max_fid = delta_freq
sigma_current = sigmas[-1]
fid, idx_width = ResonatorSpectroscopy_v2.fidelity_and_width(
avg_fidelities[sigma_current], target_fidelity)
width = idx_width * delta_freq
log.debug("sigmas " + str(sigmas) + " width (MHz): "
+ str(width / 1e6))
f_opt = freqs[np.argmax(avg_fidelities[sigma_current])]
optimal_frequency.append(f_opt)
if len(sigmas) == 1:
sigma_previous = 10 ** (np.log10(sigma_current) + 1)
else:
sigma_previous = sigmas[-2]
log_diff = np.log10(sigma_previous) - np.log10(sigma_current)
if fid >= target_fidelity and width <= max_width_at_max_fid:
# succeeded
continue_search = False
elif fid >= target_fidelity and width > max_width_at_max_fid:
# sigma is too small, update lower bound
if log_diff < 0:
sigma_new = \
10 ** (np.log10(sigma_current) - np.abs(log_diff) / 2)
else:
sigma_new = \
10 ** (np.log10(sigma_current) + np.abs(log_diff))
msg = "Width > max_width, update sigma to: {}"
log.debug(msg.format(sigma_new))
sigmas.append(sigma_new)
elif fid < target_fidelity:
# sigma is too high, update higher bound
if np.all(np.diff(sigmas) < 0):
sigma_new = 10 ** (np.log10(sigma_current) - log_diff)
else:
sigma_new = 10 ** (np.log10(sigma_current) -
np.abs(log_diff) / 2)
msg = "Fidelity < target fidelity, update sigma to: {}"
log.debug(msg.format(sigma_new))
sigmas.append(sigma_new)
return continue_search
@staticmethod
def fidelity_and_width(avg_fidelity, target_fidelity):
avg_fidelity = np.array(avg_fidelity)
max_fid = np.max(avg_fidelity)
idx_width = np.sum(
(avg_fidelity >= target_fidelity) * (avg_fidelity <= 1.))
return max_fid, idx_width
@staticmethod
def _restricted_angle(angle):
entire_div = angle // (np.sign(angle) * np.pi)
return angle - np.sign(angle) * entire_div * 2 * np.pi
@staticmethod
def three_gaussians_overlap(spectrums, sigma):
"""
Evaluates the overlap of 3 gaussian distributions for each complex
point given in spectrums.
Args:
spectrums: dict with resonnator response of each state
sigma: standard deviation of gaussians used for computing overlap
Returns:
"""
def g(x, d, sigma=0.1):
x = ResonatorSpectroscopy_v2._restricted_angle(x)
return np.exp(-d ** 2 / np.cos(x) ** 2 / (2 * sigma ** 2))
def f(gamma, val1=0, val2=1 / (2 * np.pi)):
gamma = ResonatorSpectroscopy_v2._restricted_angle(gamma)
return val1 if gamma > -np.pi / 2 and gamma < np.pi / 2 else val2
def integral(angle, dist, sigma):
const = 1 / (2 * np.pi)
p1 = const * \
integrate.quad(lambda x: f(x, g(x, dist, sigma=sigma), 0),
angle - np.pi,
angle)[0]
return -p1 + integrate.quad(lambda x: f(x),
angle - np.pi, angle)[0] + \
integrate.quad(lambda x: f(x, 1 / (2 * np.pi), 0),
angle - np.pi,
angle)[0]
assert len(spectrums) == 3, "3 spectrums required for qutrit F_RO " \
"analysis. Found {}".format((len(spectrums)))
i1s, i2s, i3s = [], [], []
# in most cases, states will be ['g', 'e', 'f'] but to ensure not to
# be dependent on labels we take indices of keys
states = list(spectrums.keys())
for i in range(len(spectrums[states[0]])):
pt1 = (spectrums[states[0]][i].real, spectrums[states[0]][i].imag)
pt2 = (spectrums[states[1]][i].real, spectrums[states[1]][i].imag)
pt3 = (spectrums[states[2]][i].real, spectrums[states[2]][i].imag)
d1 = geo.distance(pt1, pt2) / 2
d2 = geo.distance(pt2, pt3) / 2
d3 = geo.distance(pt1, pt3) / 2
# translate to point1
pt2 = tuple(np.asarray(pt2) - np.asarray(pt1))
pt3 = tuple(np.asarray(pt3) - np.asarray(pt1))
pt1 = (0., 0.)
c, R = geo.circumcenter(pt2, pt3, pt1, show=False)
gamma1 = np.arccos(d1 / R)
gamma2 = np.arccos(d2 / R)
gamma3 = np.arccos(d3 / R)
i1 = integral(gamma1, d1, sigma)
i2 = integral(gamma2, d2, sigma)
i3 = integral(gamma3, d3, sigma)
i1s.append(i1)
i2s.append(i2)
i3s.append(i3)
i1s, i2s, i3s = np.array(i1s), np.array(i2s), np.array(i3s)
total_area = 2 * i1s + 2 * i2s + 2 * i3s
avg_fidelity = total_area / 3
fid_state_0 = i1s + i3s
not0 = 1 - fid_state_0
fid_state_1 = i1s + i2s
not1 = 1 - i1s + i2s
fid_state_2 = i2s + i3s
not2 = 1 - fid_state_2
single_level_fid = {states[0]: fid_state_0,
states[1]: fid_state_1,
states[2]: fid_state_2}
return avg_fidelity, single_level_fid
def run_fitting(self):
# FIXME: Nathan: for now this is left as written previously but
# ultimately all fitting should be done in base class if possible
states = list(self.proc_data_dict['spectra'])
if len(states) == 1:
super().run_fitting()
if len(states) == 3:
f_RO_qutrit = self.find_f_RO(states)
self.fit_res["".join(states)] = dict(f_RO=f_RO_qutrit)
def prepare_plots(self):
self.get_default_plot_params(set_pars=True)
proc_data_dict = self.proc_data_dict
spectra = proc_data_dict['spectra']
plotsize = self.options_dict.get('plotsize')
plot_fn = self.plot_line
for i, (state, spectrum) in enumerate(spectra.items()):
all_freqs = proc_data_dict['plot_frequency']
freqs = all_freqs if np.ndim(all_freqs) == 1 else all_freqs[0]
self.plot_dicts['amp_{}'
.format(state)] = {
'plotfn': plot_fn,
'ax_id': 'amp',
'xvals': freqs,
'yvals': np.abs(spectrum),
'title': 'Spectroscopy amplitude: \n'
'%s-%s' % (
self.raw_data_dict[i]['measurementstring'],
self.raw_data_dict[i]['timestamp']),
'xlabel': proc_data_dict['freq_label'],
'xunit': 'Hz',
'ylabel': proc_data_dict['amp_label'],
'yrange': proc_data_dict['amp_range'],
'plotsize': plotsize,
# 'color': 'b',
'linestyle': '',
'marker': 'o',
'setlabel': '$|{}\\rangle$'.format(state),
'do_legend': True }
if state != self.ref_state and len(spectra) == 2.:
# if comparing two stattes we are interested in the
# difference between the two responses
label = "iq_distance_{}{}".format(state, self.ref_state)
self.plot_dicts[label] = {
'plotfn': plot_fn,
'ax_id': 'amp',
'xvals': proc_data_dict['plot_frequency'][0],
'yvals': proc_data_dict['iq_distance'][
state + self.ref_state],
#'color': 'g',
'linestyle': '',
'marker': 'o',
'markersize': 5,
'setlabel': label,
'do_legend': True}
fig = self.plot_difference_iq_plane()
self.figs['difference_iq_plane'] = fig
fig2 = self.plot_gaussian_overlap()
self.figs['gaussian_overlap'] = fig2
fig3 = self.plot_max_area()
self.figs['area_in_iq_plane'] = fig3
def plot_fitting(self):
fit_options = self.options_dict.get('fit_options', None)
if fit_options is None:
fitting_model = 'hanger'
else:
fitting_model = fit_options['model']
if not isinstance(self.raw_data_dict, tuple):
fit_results = self.fit_dict['fit_res']
else:
fit_results = self.proc_data_dict['fit_raw_results']
ax = self.axs['amp']
if fitting_model == 'hanger':
raise NotImplementedError(
'Plotting hanger is not supported in this class.')
elif fitting_model == 'simple_hanger':
raise NotImplementedError(
'This functions guess function is not coded up yet')
elif fitting_model == 'lorentzian':
raise NotImplementedError(
'This functions guess function is not coded up yet')
elif fitting_model == 'complex':
raise NotImplementedError(
'This functions guess function is not coded up yet')
elif fitting_model == 'hanger_with_pf':
label = "$|{}\\rangle$ {}"
all_freqs = self.proc_data_dict['plot_frequency']
freqs = all_freqs if np.ndim(all_freqs) == 1 else all_freqs[0]
for state, spectrum in self.proc_data_dict['spectra'].items():
if len(self.proc_data_dict['spectra']) == 1:
# then also add single fit parameters to the legend
# else the coupled params will be added from fit results
textstr = "f_ro = {:.3f} GHz\nf_pf = {:3f} GHz\n" \
"kappa = {:3f} MHz\nJ = {:3f} MHz\ngamma_ro = " \
"{:3f} MHz".format(
fit_results.params['omega_ro'].value * 1e-9,
fit_results.params['omega_pf'].value * 1e-9,
fit_results.params['kappa_pf'].value * 1e-6,
fit_results.params['J'].value * 1e-6,
fit_results.params['gamma_ro'].value * 1e-6)
# note: next line will have to be removed when
# cleaning up the # timestamps dependency
ax.plot(freqs,
fit_results.best_fit, 'r-', linewidth=1.5)
ax.plot([], [], 'w', label=textstr)
if len(self.proc_data_dict['spectra']) != 1 :
for states, params in self.fit_res.items():
f_r = fit_results[states]
if len(states) == 3:
ax.plot([params["f_RO"], params["f_RO"]],
[0, np.max(np.abs(np.asarray(
list(self.proc_data_dict['spectra'].values()))))],
'm--', linewidth=1.5, label="F_RO_{}"
.format(states))
ax2 = ax.twinx()
last_fit_key = list(f_r["avg_fidelities"].keys())[-1]
ax2.scatter(freqs, f_r["avg_fidelities"][last_fit_key],
color='c',
label= "{} fidelity".format(states),
marker='.')
ax2.set_ylabel("Fidelity")
label = "f_RO_{} = {:.5f} GHz".format(states,
params['f_RO'] * 1e-9)
ax.plot([],[], label=label)
fig, ax3 = plt.subplots()
for sigma, avg_fid in f_r['avg_fidelities'].items():
ax3.plot(self.proc_data_dict['plot_frequency'][0],
avg_fid, label=sigma)
ax3.plot([f_r["optimal_frequency"][-1]],
[f_r["optimal_frequency"][-1]], "k--")
t_f = self.options_dict.get('qutrit_fit_options', dict())
ax3.set_ylim([0.9, 1])
elif len(states) == 2:
c = "r--"
c2 = "k--"
ax.plot(freqs, f_r[0].eval(f_r[0].params, f=freqs),
c, label=label.format(states[0], "fit"),
linewidth=1.5)
ax.plot(freqs, f_r[1].eval(f_r[1].params, f=freqs),
c2, label=label.format(states[1], "fit"),
linewidth=1.5)
ax.plot([params['f_RO'], params['f_RO']],
[0, np.max(np.abs(np.asarray(list(self.proc_data_dict['spectra'].values()))))],
'r--', linewidth=2)
params_str = 'states: {}' \
'\n kappa = {:.3f} MHz\n J = {:.3f} MHz' \
'\n chi = {:.3f} MHz\n f_pf = {:.3f} GHz' \
'\n f_rr $|{}\\rangle$ = {:.3f} GHz' \
'\n f_rr $|{}\\rangle$ = {:.3f} GHz' \
'\n f_RO = {:.3f} GHz'.format(
states,
f_r[0].params['kappa_pf'].value * 1e-6,
f_r[0].params['J'].value * 1e-6,
(f_r[1].params['omega_ro'].value -
f_r[0].params['omega_ro'].value) / 2 * 1e-6,
f_r[0].params['omega_pf'].value * 1e-9,
states[0], f_r[0].params['omega_ro'].value * 1e-9,
states[1], f_r[1].params['omega_ro'].value * 1e-9,
params['f_RO'] * 1e-9)
ax.plot([],[], 'w', label=params_str)
ax.legend(loc='upper left', bbox_to_anchor=[1.1, 1])
def plot_difference_iq_plane(self, fig=None):
spectrums = self.proc_data_dict['spectra']
all_freqs = self.proc_data_dict['plot_frequency']
freqs = all_freqs if np.ndim(all_freqs) == 1 else all_freqs[0]
total_dist = np.abs(spectrums['e'] - spectrums['g']) + \
np.abs(spectrums['f'] - spectrums['g']) + \
np.abs(spectrums['f'] - spectrums['e'])
fmax = freqs[np.argmax(total_dist)]
# FIXME: just as debug plotting for now
if fig is None:
fig, ax = plt.subplots(2, figsize=(10,14))
else:
ax = fig.get_axes()
ax[0].plot(freqs, np.abs(spectrums['g']), label='g')
ax[0].plot(freqs, np.abs(spectrums['e']), label='e')
ax[0].plot(freqs, np.abs(spectrums['f']), label='f')
ax[0].set_ylabel('Amplitude')
ax[0].legend()
ax[1].plot(freqs, np.abs(spectrums['e'] - spectrums['g']), label='eg')
ax[1].plot(freqs, np.abs(spectrums['f'] - spectrums['g']), label='fg')
ax[1].plot(freqs, np.abs(spectrums['e'] - spectrums['f']), label='ef')
ax[1].plot(freqs, total_dist, label='total distance')
ax[1].set_xlabel("Freq. [Hz]")
ax[1].set_ylabel('Distance in IQ plane')
ax[0].set_title(f"Max Diff Freq: {fmax*1e-9} GHz")
ax[1].legend(loc=[1.01, 0])
return fig
def plot_gaussian_overlap(self, fig=None):
states = list(self.proc_data_dict['spectra'])
all_freqs = self.proc_data_dict['plot_frequency']
freqs = all_freqs if np.ndim(all_freqs) == 1 else all_freqs[0]
if len(states) == 3:
f_RO_qutrit = self.find_f_RO(states)
f_r = self.proc_data_dict["fit_raw_results"]["".join(states)]
if fig is None:
fig, ax = plt.subplots(2, figsize=(10,14))
else:
ax = fig.get_axes()
ax[0].plot([f_RO_qutrit, f_RO_qutrit],
[0, 1],
'm--', linewidth=1.5, label="F_RO_{}"
.format(states))
last_fit_key = list(f_r["avg_fidelities"].keys())[-1]
ax[0].scatter(freqs, f_r["avg_fidelities"][last_fit_key],
color='c',
label="{} fidelity".format(states),
marker='.')
ax[0].set_ylabel("Expected Fidelity")
label = "f_RO_{} = {:.6f} GHz".format(states,
f_RO_qutrit * 1e-9)
ax[0].plot([], [], label=label)
ax[0].legend()
for sigma, avg_fid in f_r['avg_fidelities'].items():
ax[1].plot(self.proc_data_dict['plot_frequency'][0],
avg_fid, label=sigma)
ax[1].axvline(f_r["optimal_frequency"][-1],linestyle="--", )
#ax.set_ylim([0.9, 1])
return fig
def plot_max_area(self, fig=None):
spectrums = self.proc_data_dict['spectra']
states = list(self.proc_data_dict['spectra'])
all_freqs = self.proc_data_dict['plot_frequency']
freqs = all_freqs if np.ndim(all_freqs) == 1 else all_freqs[0]
if len(states) == 3:
# Area of triangle in IQ plane using Heron formula
s1, s2, s3 = np.abs(spectrums['e'] - spectrums['g']), \
np.abs(spectrums['f'] - spectrums['g']),\
np.abs(spectrums['f'] - spectrums['e'])
s = (s1 + s2 + s3)/2
qutrit_triangle_area = np.sqrt(s * (s - s1) * (s - s2) * (s - s3))
f_max_area = freqs[np.argmax(qutrit_triangle_area)]
if fig is None:
fig, ax = plt.subplots(1, figsize=(14, 8))
else:
ax = fig.get_axes()
ax.plot([f_max_area, f_max_area],
[0, np.max(qutrit_triangle_area)],
'm--', linewidth=1.5, label="F_RO_{}"
.format(states))
ax.scatter(freqs, qutrit_triangle_area,
label="{} area in IQ".format(states))
ax.set_ylabel("qutrit area in IQ")
ax.set_xlabel("Frequency (Hz)")
ax.set_title( "f_RO_{}_area = {:.6f} GHz".format(states,
f_max_area * 1e-9))
return fig
# ax.set_ylim([0.9, 1])
def plot(self, key_list=None, axs_dict=None, presentation_mode=None, no_label=False):
super(ResonatorSpectroscopy_v2, self).plot(key_list=key_list,
axs_dict=axs_dict,
presentation_mode=presentation_mode)
if self.do_fitting:
self.plot_fitting()
class Spectroscopy(ba.BaseDataAnalysis):
""" A baseclass for spectroscopic measurements.
Supports analyzing data from 2d sweeps and also combining data from multiple
timestamps.
Args:
t_start, t_stop, options_dict, label, extract_only, do_fitting:
See dodcstring of `BaseDataAnalysis`.
auto: bool
Run the analysis as the last step of initialization.
Parameters used from the options_dict:
param_2d: A path to a parameter in the hdf5 file that is interpreted
as the second sweep dimension in case the sweep is split into
multiple 1d sweep files. Optional.
Parameters used either from metadata or options_dict:
calc_pca: Whether to calculate the principal component of the spectrum,
combining amplitude and phase. Default False.
global_pca: If calculating the principal component, whether to do it
globally or per-second-sweep-dimension-point. Default False.
Plotting related parameters either from metadata or options_dict:
plot_lines: Whether to do a line plots. Defaults to True if nr of 2d
sweep points is smaller than 4, False otherwise.
plot_color: Whether to do a 2d coulour-plots. Defaults to True if nr of
2d sweep points is larger than 3, False otherwise.
plot_amp: Whether to plot transmission amplitude. Default True.
plot_phase: Whether to plot transmission phase. Default True.
plot_pca: Whether to plot principal component of the spectrum.
Default False.
label_1d: Label for the first sweep dimension. Default 'Frequency'.
unit_1d: Unit for the first sweep dimension. Default 'Hz'.
label_2d: Label for the second sweep dimension. Default 'Frequency'.
unit_2d: Unit for the second sweep dimension. Default 'Frequency'.
label_amp: Label for the amplitude output. Default 'Amplitude'.
unit_amp: Unit for the amplitude output. Default 'V'.
range_amp: Range for the amplitude output. Default min-to-max.
label_phase: Label for the phase output. Default 'Phase'.
unit_phase: Unit for the phase output. Default 'deg'.
range_phase: Range for the phase output. Default Default min-to-max.
label_pca: Label for the principal component output.
Default 'Principal component'.
unit_pca: Unit for the principal component output. Default 'V'.
range_pca: Range for the principal component output. Default min-to-max.
"""
def __init__(self, t_start: str = None,
t_stop: str = None,
options_dict: dict = None,
label: str = None,
extract_only: bool = False,
auto: bool = True,
do_fitting: bool = False):
if options_dict is None:
options_dict = {}
super().__init__(t_start=t_start, t_stop=t_stop,
options_dict=options_dict,
label=label,
extract_only=extract_only,
do_fitting=do_fitting)
self.params_dict = {'measurementstring': 'measurementstring'}
self.param_2d = options_dict.get('param_2d', None)
if self.param_2d is not None:
pname = 'Instrument settings.' + self.param_2d
self.params_dict.update({'param_2d': pname})
self.numeric_params = ['param_2d']
if auto:
self.run_analysis()
def process_data(self):
pdd = self.proc_data_dict
rdds = self.raw_data_dict
if not isinstance(self.raw_data_dict, (tuple, list)):
rdds = (rdds,)
pdd['freqs'] = [] # list of lists of floats
pdd['amps'] = [] # list of lists of floats
pdd['phases'] = [] # list of lists of floats
pdd['values_2d'] = [] # list of floats
for rdd in rdds:
f, a, p, v = self._process_spec_rdd(rdd)
pdd['freqs'] += f
pdd['amps'] += a
pdd['phases'] += p
pdd['values_2d'] += v
next_idx = 0
for i in range(len(pdd['values_2d'])):
if pdd['values_2d'][i] is None:
pdd['values_2d'][i] = next_idx
next_idx += 1
spn = rdds[0]['sweep_parameter_names']
pdd['label_2d'] = '2D index' if isinstance(spn, str) else spn[1]
pdd['label_2d'] = self.get_param_value('name_2d', pdd['label_2d'])
spu = rdds[0]['sweep_parameter_units']
pdd['unit_2d'] = '' if isinstance(spu, str) else spu[1]
pdd['unit_2d'] = self.get_param_value('unit_2d', pdd['unit_2d'])
pdd['ts_string'] = self.timestamps[0]
if len(self.timestamps) > 1:
pdd['ts_string'] = pdd['ts_string'] + ' to ' + self.timestamps[-1]
if self.get_param_value('calc_pca', False):
if self.get_param_value('global_pca', False):
# find global transformation
amp = np.array([a for amps in pdd['amps'] for a in amps])
phase = np.array([p for ps in pdd['phases'] for p in ps])
_, pca_basis = self._transform_pca(amp, phase)
# apply found transform to data
pdd['pcas'] = []
for amp, phase in zip(pdd['amps'], pdd['phases']):
pca, _ = self._transform_pca(amp, phase, basis=pca_basis)
pdd['pcas'].append(pca)
# subtract offset and fix sign
pca = np.array([p for pcas in pdd['pcas'] for p in pcas])
median = np.median(pca)
sign = np.sign(pca[np.argmax(np.abs(pca - median))])
for i in range(len(pdd['pcas'])):
pdd['pcas'][i] = sign * (pdd['pcas'][i] - median)
else:
pdd['pcas'] = []
for amp, phase in zip(pdd['amps'], pdd['phases']):
pca, _ = self._transform_pca(amp, phase)
pdd['pcas'].append(pca)
@staticmethod
def _transform_pca(amp, phase, basis=None):
i = amp * np.cos(np.pi * phase / 180)
q = amp * np.sin(np.pi * phase / 180)
pca = np.array([i, q]).T
if basis is None:
pca -= pca.mean(axis=0)
pca_basis = np.linalg.eigh(pca.T @ pca)[1]
else:
pca_basis = basis
pca = (pca_basis @ pca.T)[1]
if basis is None:
pca -= np.median(pca)
pca *= np.sign(pca[np.argmax(np.abs(pca))])
return pca, pca_basis
@staticmethod
def _process_spec_rdd(rdd):
if 'soft_sweep_points' in rdd:
# 2D sweep
v = list(rdd['soft_sweep_points'])
f = len(v) * [rdd['hard_sweep_points']]
a = list(rdd['measured_data']['Magn'].T)
p = list(rdd['measured_data']['Phase'].T)
else:
# 1D sweep
v = [rdd.get('param_2d', None)]
f = [rdd['hard_sweep_points']]
a = [rdd['measured_data']['Magn']]
p = [rdd['measured_data']['Phase']]
return f, a, p, v
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
if isinstance(rdd, (tuple, list)):
rdd = rdd[0]
def calc_range(values):
return (min([np.min(x) for x in values]),
max([np.max(x) for x in values]))
plot_lines = self.get_param_value('plot_lines', len(pdd['amps']) <= 3)
plot_color = self.get_param_value('plot_color', len(pdd['amps']) > 3)
plot_amp = self.get_param_value('plot_amp', True)
plot_phase = self.get_param_value('plot_phase', True)
plot_pca = self.get_param_value('plot_pca',
self.get_param_value('calc_pca', False))
label1 = self.get_param_value('label_1d', 'Frequency')
unit1 = self.get_param_value('unit_1d', 'Hz')
label2 = self.get_param_value('label_2d', pdd['label_2d'])
unit2 = self.get_param_value('unit_2d', pdd['unit_2d'])
label_amp = self.get_param_value('label_amp', 'Amplitude')
unit_amp = self.get_param_value('unit_amp', 'V')
range_amp = self.get_param_value('range_amp', calc_range(pdd['amps']))
label_phase = self.get_param_value('label_phase', 'Phase')
unit_phase = self.get_param_value('unit_phase', 'deg')
range_phase = self.get_param_value('range_phase',
calc_range(pdd['phases']))
label_pca = self.get_param_value('label_pca', 'Principal component')
unit_pca = self.get_param_value('unit_pca', 'V')
range_pca = calc_range(pdd['pcas']) if 'pcas' in pdd else (0, 1)
range_pca = self.get_param_value('range_pca', range_pca)
fig_title_suffix = ' ' + rdd['measurementstring'] + '\n' + \
pdd['ts_string']
if plot_lines:
for enable, param, plot_name, ylabel, yunit, yrange in [
(plot_amp, 'amps', 'amp_1d', label_amp, unit_amp,
range_amp),
(plot_phase, 'phases', 'phase_1d', label_phase, unit_phase,
range_phase),
(plot_pca, 'pcas', 'pca_1d', label_pca, unit_pca,
range_pca),
]:
if enable:
self.plot_dicts[plot_name] = {
'fig_id': plot_name,
'plotfn': self.plot_line,
'xvals': pdd['freqs'],
'yvals': pdd[param],
'xlabel': label1,
'xunit': unit1,
'ylabel': ylabel,
'yunit': yunit,
'yrange': yrange,
'title': plot_name + fig_title_suffix,
}
if plot_color:
for enable, param, plot_name, zlabel, zunit, zrange in [
(plot_amp, 'amps', 'amp_2d', label_amp, unit_amp,
range_amp),
(plot_phase, 'phases', 'phase_2d', label_phase, unit_phase,
range_phase),
(plot_pca, 'pcas', 'pca_2d', label_pca, unit_pca,
range_pca),
]:
if enable:
self.plot_dicts[plot_name] = {
'fig_id': plot_name,
'plotfn': self.plot_colorx,
'xvals': pdd['values_2d'],
'yvals': pdd['freqs'],
'zvals': pdd[param],
'zrange': zrange,
'xlabel': label2,
'xunit': unit2,
'ylabel': label1,
'yunit': unit1,
'clabel': f'{zlabel} ({zunit})',
'title': plot_name + fig_title_suffix,
}
class QubitTrackerSpectroscopy(Spectroscopy):
"""A class for peak-tracking 2d spectroscopy.
Fits the spectroscopy data to a Gaussian model and can extrapolate a
polynomial model of the peak frequency as a function of the second sweep
parameter to guess a frequency range for the next sweep.
Args: Same as for `Spectroscopy`.
Parameters used from the options_dict: Same as for `Spectroscopy`.
Parameters used either from metadata or options_dict:
calc_pca: Hard-coded to True, as the amplitude-phase data needs to be
reduced for fitting.
global_pca: Whether to do principal component analysis globally or
per-second-sweep-dimension-point. Default False.
tracker_fit_order: Polynomial order for extrapolating the measurement
range. Default 1.
tracker_fit_points: Number of 2d sweep points to use for the polynomial
fit. The points are taken evenly from the entire range. Default 4.
Plotting related parameters either from metadata or options_dict:
Same as for `Spectroscopy`.
"""
def __init__(self, t_start: str = None,
t_stop: str = None,
options_dict: dict = None,
label: str = None,
extract_only: bool = False,
auto: bool = True,
do_fitting: bool = True):
if options_dict is None:
options_dict = {}
options_dict['calc_pca'] = True
super().__init__(t_start=t_start, t_stop=t_stop,
options_dict=options_dict, label=label,
extract_only=extract_only, auto=auto,
do_fitting=do_fitting)
def prepare_fitting(self):
super().prepare_fitting()
pdd = self.proc_data_dict
fit_order = self.get_param_value('tracker_fit_order', 1)
fit_pts = self.get_param_value('tracker_fit_points', 4)
if fit_pts < fit_order + 1:
raise ValueError(f"Can't fit {fit_pts} points to order {fit_order} "
"polynomial")
idxs = np.round(
np.linspace(0, len(pdd['pcas']) - 1, fit_pts)).astype(np.int)
pdd['fit_idxs'] = idxs
model = fit_mods.GaussianModel
model.guess = fit_mods.Gaussian_guess.__get__(model, model.__class__)
for i in idxs:
self.fit_dicts[f'tracker_fit_{i}'] = {
'model': model,
'fit_xvals': {'freq': pdd['freqs'][i]},
'fit_yvals': {'data': pdd['pcas'][i]},
}
def analyze_fit_results(self):
super().analyze_fit_results()
pdd = self.proc_data_dict
fit_order = self.get_param_value('tracker_fit_order', 1)
model = lmfit.models.PolynomialModel(degree=fit_order)
xpoints = [pdd['values_2d'][i] for i in pdd['fit_idxs']]
ypoints = [self.fit_res[f'tracker_fit_{i}'].best_values['mu']
for i in pdd['fit_idxs']]
self.fit_dicts['tracker_fit'] = {
'model': model,
'fit_xvals': {'x': xpoints},
'fit_yvals': {'data': ypoints},
}
self.run_fitting()
self.save_fit_results()
def prepare_plots(self):
super().prepare_plots()
pdd = self.proc_data_dict
plot_color = self.get_param_value('plot_color', len(pdd['amps']) > 3)
if self.do_fitting and plot_color:
xpoints = [pdd['values_2d'][i] for i in pdd['fit_idxs']]
ypoints = [self.fit_res[f'tracker_fit_{i}'].best_values['mu']
for i in pdd['fit_idxs']]
self.plot_dicts['pca_2d_fit1'] = {
'fig_id': 'pca_2d',
'plotfn': self.plot_line,
'xvals': xpoints,
'yvals': ypoints,
'marker': 'o',
'linestyle': '',
'color': 'red',
}
xpoints = np.linspace(min(xpoints), max(xpoints), 101)
fr = self.fit_res[f'tracker_fit']
ypoints = fr.model.func(xpoints, **fr.best_values)
self.plot_dicts['pca_2d_fit2'] = {
'fig_id': 'pca_2d',
'plotfn': self.plot_line,
'xvals': xpoints,
'yvals': ypoints,
'marker': '',
'linestyle': '-',
'color': 'green',
}
def next_round_limits(self, freq_slack=0):
"""Calculate 2d-parameter and frequency ranges for next tracker sweep.
The 2d parameter range is calculated that it spans the same range as
the current sweep, but starts one mean step-size after the current
sweep.
The frequency range is calculated such that the extrapolated polynomial
fits inside the range within the 2d parameter range, with some optional
extra margin that is passed as an argument.
Args:
freq_slack: float
Extra frequency margin for the output frequency range. The
output range is extended by this value on each side.
Default 0.
Returns:
v2d_next: (float, float)
Range for the 2d sweep parameter for the next sweep.
f_next: (float, float)
Range for the frequency sweep for the next sweep.
"""
if 'tracker_fit' not in self.fit_res:
raise KeyError('Tracker fit not yet run.')
pdd = self.proc_data_dict
fr = self.fit_res['tracker_fit']
v2d = pdd['values_2d']
v2d_next = (v2d[-1] + (v2d[-1] - v2d[0])/(len(v2d)-1),
2*v2d[-1] - v2d[0] + (v2d[-1] - v2d[0])/(len(v2d)-1))
x = np.linspace(v2d_next[0], v2d_next[1], 101)
y = fr.model.func(x, **fr.best_values)
f_next = (y.min() - freq_slack, y.max() + freq_slack)
return v2d_next, f_next
| QudevETH/PycQED_py3 | pycqed/analysis_v2/spectroscopy_analysis.py | Python | mit | 81,833 | [
"Gaussian"
] | ad5214c357453dc63c6ec17afbc5513882a7a6c13d82e005a4099093149bbf8c |
#-*- coding: utf-8 -*-
import modules.mapcss_lib as mapcss
import regex as re # noqa
from plugins.Plugin import with_options # noqa
from plugins.PluginMapCSS import PluginMapCSS
class Josm_deprecated(PluginMapCSS):
MAPCSS_URL = 'https://josm.openstreetmap.de/browser/josm/trunk/resources/data/validator/deprecated.mapcss'
def init(self, logger):
super().init(logger)
tags = capture_tags = {} # noqa
self.errors[9002001] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('deprecated tagging'))
self.errors[9002002] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('\'\'{0}\'\' is meaningless, use more specific tags, e.g. \'\'{1}\'\'', 'access=designated', 'bicycle=designated'))
self.errors[9002003] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('\'\'{0}\'\' does not specify the official mode of transportation, use \'\'{1}\'\' for example', 'access=official', 'bicycle=official'))
self.errors[9002004] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0}={1} is unspecific. Instead of \'\'{1}\'\' please give more information about what exactly should be fixed.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}')))
self.errors[9002005] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')))
self.errors[9002006] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.', mapcss._tag_uncapture(capture_tags, '{0.tag}')))
self.errors[9002007] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0}={1} is unspecific. Please replace \'\'{1}\'\' by a specific value.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}')))
self.errors[9002008] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0} should be replaced with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')))
self.errors[9002009] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0} = {1}; remove {0}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{1.value}')))
self.errors[9002010] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('Unspecific tag {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')))
self.errors[9002011] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('key with uncommon character'))
self.errors[9002012] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('uncommon short key'))
self.errors[9002013] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:bitcoin=yes', 'payment:litecoin=yes'))
self.errors[9002014] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('questionable key (ending with a number)'))
self.errors[9002016] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0} is not recommended. Use the Reverse Ways function from the Tools menu.', mapcss._tag_uncapture(capture_tags, '{0.tag}')))
self.errors[9002017] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('The key {0} has an uncommon value.', mapcss._tag_uncapture(capture_tags, '{1.key}')))
self.errors[9002018] = self.def_class(item = 9002, level = 2, tags = ["tag", "deprecated"], title = mapcss.tr('misspelled value'))
self.errors[9002019] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('wrong value: {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')))
self.errors[9002020] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('unusual value of {0}', mapcss._tag_uncapture(capture_tags, '{0.key}')))
self.errors[9002021] = self.def_class(item = 9002, level = 3, tags = ["tag", "deprecated"], title = mapcss.tr('{0} is unspecific', mapcss._tag_uncapture(capture_tags, '{0.tag}')))
self.re_01eb1711 = re.compile(r'^(yes|both|no)$')
self.re_047d5648 = re.compile(r'^(1|2|3|4|5|grade1|grade2|grade3|grade4|grade5)$')
self.re_0c5b5730 = re.compile(r'color:')
self.re_0f294fdf = re.compile(r'^[1-9][0-9]*$')
self.re_1f92073a = re.compile(r'^(?i)fixme$')
self.re_24dfeb95 = re.compile(r'^(tower|pole|insulator|portal|terminal)$')
self.re_27210286 = re.compile(r'^.$')
self.re_2f881233 = re.compile(r'^(?i)(bbq)$')
self.re_2fd4cdcf = re.compile(r'^(crossover|siding|spur|yard)$')
self.re_300dfa36 = re.compile(r'^[^t][^i][^g].+_[0-9]$')
self.re_3185ac6d = re.compile(r'^note_[0-9]$')
self.re_340a2b31 = re.compile(r'(?i)(;bbq|bbq;)')
self.re_34c15d62 = re.compile(r'^..$')
self.re_493fd1a6 = re.compile(r'^is_in:.*$')
self.re_51df498f = re.compile(r'^(alley|drive-through|drive_through|driveway|emergency_access|parking_aisle|rest_area|slipway|yes)$')
self.re_554de4c7 = re.compile(r':color')
self.re_5ee0acf2 = re.compile(r'josm\/ignore')
self.re_6029fe03 = re.compile(r'^diaper:')
self.re_61b0be1b = re.compile(r'^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$')
self.re_620f4d52 = re.compile(r'=|\+|\/|&|<|>|;|\'|\"|%|#|@|\\|,|\.|\{|\}|\?|\*|\^|\$')
self.re_6d27b157 = re.compile(r'^description_[0-9]$')
self.re_787405b1 = re.compile(r'^(yes|no|limited)$')
self.re_7a045a17 = re.compile(r'^(irrigation|transportation|water_power)$')
self.re_7d409ed5 = re.compile(r'(?i)(_bbq)')
def node(self, data, tags):
capture_tags = {}
keys = tags.keys()
err = []
set_bbq_autofix = set_diaper___checked = set_diaper_checked = set_generic_power_tower_type_warning = set_power_pole_type_warning = set_power_tower_type_warning = set_samecolor = False
# *[barrier=wire_fence]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wire_fence'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=fence + fence_type=chain_link"
# fixAdd:"barrier=fence"
# fixAdd:"fence_type=chain_link"
err.append({'class': 9002001, 'subclass': 1107799632, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','fence'],
['fence_type','chain_link']])
}})
# *[barrier=wood_fence]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wood_fence'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=fence + fence_type=wood"
# fixAdd:"barrier=fence"
# fixAdd:"fence_type=wood"
err.append({'class': 9002001, 'subclass': 1412230714, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','fence'],
['fence_type','wood']])
}})
# node[highway=ford]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'ford'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"ford=yes"
# fixAdd:"ford=yes"
# fixRemove:"highway"
err.append({'class': 9002001, 'subclass': 1317841090, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['ford','yes']]),
'-': ([
'highway'])
}})
# *[highway=stile]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'stile'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=stile"
# fixAdd:"barrier=stile"
# fixRemove:"highway"
err.append({'class': 9002001, 'subclass': 1435678043, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','stile']]),
'-': ([
'highway'])
}})
# *[highway=incline]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'incline'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 765169083, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[highway=incline_steep]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'incline_steep'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 1966772390, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[highway=unsurfaced]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'unsurfaced'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"highway=* + surface=unpaved"
# fixAdd:"highway=road"
# fixAdd:"surface=unpaved"
err.append({'class': 9002001, 'subclass': 20631498, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['highway','road'],
['surface','unpaved']])
}})
# *[landuse=wood]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'wood'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=forest"
# suggestAlternative:"natural=wood"
err.append({'class': 9002001, 'subclass': 469903103, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=marsh]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'marsh'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=wetland + wetland=marsh"
# fixAdd:"natural=wetland"
# fixAdd:"wetland=marsh"
err.append({'class': 9002001, 'subclass': 1459865523, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','wetland'],
['wetland','marsh']])
}})
# *[highway=byway]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'byway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
err.append({'class': 9002001, 'subclass': 1844620979, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[power_source]
if ('power_source' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power_source'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"generator:source"
err.append({'class': 9002001, 'subclass': 34751027, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[power_rating]
if ('power_rating' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power_rating'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"generator:output"
err.append({'class': 9002001, 'subclass': 904750343, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[shop=antique]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'antique'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=antiques"
# fixAdd:"shop=antiques"
err.append({'class': 9002001, 'subclass': 596668979, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','antiques']])
}})
# *[shop=bags]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'bags'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=bag"
# fixAdd:"shop=bag"
err.append({'class': 9002001, 'subclass': 1709003584, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','bag']])
}})
# *[shop=fashion]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fashion'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=clothes"
# fixAdd:"shop=clothes"
err.append({'class': 9002001, 'subclass': 985619804, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','clothes']])
}})
# *[shop=organic]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'organic'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=* + organic=only"
# suggestAlternative:"shop=* + organic=yes"
err.append({'class': 9002001, 'subclass': 1959365145, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=pets]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'pets'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=pet"
# fixAdd:"shop=pet"
err.append({'class': 9002001, 'subclass': 290270098, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','pet']])
}})
# *[shop=pharmacy]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'pharmacy'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=pharmacy"
# fixChangeKey:"shop => amenity"
err.append({'class': 9002001, 'subclass': 350722657, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'shop')]]),
'-': ([
'shop'])
}})
# *[bicycle_parking=sheffield]
if ('bicycle_parking' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bicycle_parking') == mapcss._value_capture(capture_tags, 0, 'sheffield'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bicycle_parking=stands"
# fixAdd:"bicycle_parking=stands"
err.append({'class': 9002001, 'subclass': 718874663, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bicycle_parking','stands']])
}})
# *[amenity=emergency_phone]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'emergency_phone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=phone"
# fixRemove:"amenity"
# fixAdd:"emergency=phone"
err.append({'class': 9002001, 'subclass': 1108230656, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency','phone']]),
'-': ([
'amenity'])
}})
# *[sport=gaelic_football]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'gaelic_football'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=gaelic_games"
# fixAdd:"sport=gaelic_games"
err.append({'class': 9002001, 'subclass': 1768681881, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','gaelic_games']])
}})
# *[power=station]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=plant"
# suggestAlternative:"power=substation"
err.append({'class': 9002001, 'subclass': 52025933, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[power=sub_station]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'sub_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=substation"
# fixAdd:"power=substation"
err.append({'class': 9002001, 'subclass': 1423074682, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['power','substation']])
}})
# *[location=rooftop]
if ('location' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'location') == mapcss._value_capture(capture_tags, 0, 'rooftop'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location=roof"
# fixAdd:"location=roof"
err.append({'class': 9002001, 'subclass': 1028577225, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location','roof']])
}})
# *[generator:location]
if ('generator:location' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:location'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"location"
# fixChangeKey:"generator:location => location"
err.append({'class': 9002001, 'subclass': 900615917, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location', mapcss.tag(tags, 'generator:location')]]),
'-': ([
'generator:location'])
}})
# *[generator:method=dam]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'dam'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-storage"
# fixAdd:"generator:method=water-storage"
err.append({'class': 9002001, 'subclass': 248819368, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-storage']])
}})
# *[generator:method=pumped-storage]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'pumped-storage'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-pumped-storage"
# fixAdd:"generator:method=water-pumped-storage"
err.append({'class': 9002001, 'subclass': 93454158, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-pumped-storage']])
}})
# *[generator:method=pumping]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'pumping'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-pumped-storage"
# fixAdd:"generator:method=water-pumped-storage"
err.append({'class': 9002001, 'subclass': 2115673716, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-pumped-storage']])
}})
# *[fence_type=chain]
if ('fence_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fence_type') == mapcss._value_capture(capture_tags, 0, 'chain'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=chain"
# suggestAlternative:"barrier=fence + fence_type=chain_link"
err.append({'class': 9002001, 'subclass': 19409288, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=entrance]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'entrance'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"entrance"
err.append({'class': 9002001, 'subclass': 306662985, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[board_type=board]
if ('board_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'board_type') == mapcss._value_capture(capture_tags, 0, 'board'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixRemove:"board_type"
err.append({'class': 9002001, 'subclass': 1150949316, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'board_type'])
}})
# *[man_made=measurement_station]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'measurement_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=monitoring_station"
# fixAdd:"man_made=monitoring_station"
err.append({'class': 9002001, 'subclass': 700465123, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','monitoring_station']])
}})
# *[measurement=water_level]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'water_level'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:water_level=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:water_level=yes"
err.append({'class': 9002001, 'subclass': 634647702, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:water_level','yes']]),
'-': ([
'measurement'])
}})
# *[measurement=weather]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'weather'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:weather=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:weather=yes"
err.append({'class': 9002001, 'subclass': 336627227, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:weather','yes']]),
'-': ([
'measurement'])
}})
# *[measurement=seismic]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'seismic'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:seismic_activity=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:seismic_activity=yes"
err.append({'class': 9002001, 'subclass': 1402131289, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:seismic_activity','yes']]),
'-': ([
'measurement'])
}})
# *[monitoring:river_level]
if ('monitoring:river_level' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'monitoring:river_level'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"monitoring:water_level"
# fixChangeKey:"monitoring:river_level => monitoring:water_level"
err.append({'class': 9002001, 'subclass': 264907924, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:water_level', mapcss.tag(tags, 'monitoring:river_level')]]),
'-': ([
'monitoring:river_level'])
}})
# *[stay]
if ('stay' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'stay'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"maxstay"
# fixChangeKey:"stay => maxstay"
err.append({'class': 9002001, 'subclass': 787370129, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['maxstay', mapcss.tag(tags, 'stay')]]),
'-': ([
'stay'])
}})
# *[emergency=aed]
if ('emergency' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'emergency') == mapcss._value_capture(capture_tags, 0, 'aed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=defibrillator"
# fixAdd:"emergency=defibrillator"
err.append({'class': 9002001, 'subclass': 707111885, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency','defibrillator']])
}})
# *[day_on][!restriction]
# *[day_off][!restriction]
# *[date_on][!restriction]
# *[date_off][!restriction]
# *[hour_on][!restriction]
# *[hour_off][!restriction]
if ('date_off' in keys) or ('date_on' in keys) or ('day_off' in keys) or ('day_on' in keys) or ('hour_off' in keys) or ('hour_on' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'day_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'day_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'date_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'date_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hour_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hour_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"*:conditional"
# assertMatch:"node day_on=0-12"
err.append({'class': 9002001, 'subclass': 294264920, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[access=designated]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'designated'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("''{0}'' is meaningless, use more specific tags, e.g. ''{1}''","access=designated","bicycle=designated")
err.append({'class': 9002002, 'subclass': 2057594338, 'text': mapcss.tr('\'\'{0}\'\' is meaningless, use more specific tags, e.g. \'\'{1}\'\'', 'access=designated', 'bicycle=designated')})
# *[access=official]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'official'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("''{0}'' does not specify the official mode of transportation, use ''{1}'' for example","access=official","bicycle=official")
err.append({'class': 9002003, 'subclass': 1909133836, 'text': mapcss.tr('\'\'{0}\'\' does not specify the official mode of transportation, use \'\'{1}\'\' for example', 'access=official', 'bicycle=official')})
# *[fixme=yes]
# *[FIXME=yes]
if ('FIXME' in keys) or ('fixme' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fixme') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'FIXME') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0}={1} is unspecific. Instead of ''{1}'' please give more information about what exactly should be fixed.","{0.key}","{0.value}")
err.append({'class': 9002004, 'subclass': 136657482, 'text': mapcss.tr('{0}={1} is unspecific. Instead of \'\'{1}\'\' please give more information about what exactly should be fixed.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[name][name=~/^(?i)fixme$/]
if ('name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'name') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_1f92073a), mapcss._tag_capture(capture_tags, 1, tags, 'name')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.","{0.key}","{0.tag}")
# fixRemove:"name"
# assertMatch:"node name=FIXME"
# assertMatch:"node name=Fixme"
# assertMatch:"node name=fixme"
# assertNoMatch:"node name=valid name"
err.append({'class': 9002005, 'subclass': 642340557, 'text': mapcss.tr('Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'name'])
}})
# *[note][note=~/^(?i)fixme$/]
if ('note' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'note') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_1f92073a), mapcss._tag_capture(capture_tags, 1, tags, 'note')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.","{0.tag}")
err.append({'class': 9002006, 'subclass': 1243120287, 'text': mapcss.tr('{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[type=broad_leaved]
# *[type=broad_leafed]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'broad_leaved'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'broad_leafed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=broadleaved"
# fixAdd:"leaf_type=broadleaved"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 293968062, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','broadleaved']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[wood=coniferous]
# *[type=coniferous]
# *[type=conifer]
if ('type' in keys) or ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'coniferous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'coniferous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'conifer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=needleleaved"
# fixAdd:"leaf_type=needleleaved"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 50517650, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','needleleaved']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[wood=mixed]
if ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'mixed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=mixed"
# fixAdd:"leaf_type=mixed"
# fixRemove:"wood"
err.append({'class': 9002001, 'subclass': 235914603, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','mixed']]),
'-': ([
'wood'])
}})
# *[wood=evergreen]
# *[type=evergreen]
if ('type' in keys) or ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'evergreen'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'evergreen'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_cycle=evergreen"
# fixAdd:"leaf_cycle=evergreen"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 747964532, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_cycle','evergreen']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[type=deciduous]
# *[type=deciduos]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'deciduous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'deciduos'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_cycle=deciduous"
# fixAdd:"leaf_cycle=deciduous"
# fixRemove:"type"
err.append({'class': 9002001, 'subclass': 591116099, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_cycle','deciduous']]),
'-': ([
'type'])
}})
# *[wood=deciduous]
if ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'deciduous'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type + leaf_cycle"
err.append({'class': 9002001, 'subclass': 1100223594, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# node[type=palm]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'palm'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type"
# suggestAlternative:"species"
# suggestAlternative:"trees"
err.append({'class': 9002001, 'subclass': 1453672853, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=land]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'land'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Please use instead a multipolygon.","{0.tag}")
err.append({'class': 9002001, 'subclass': 94558529, 'text': mapcss.tr('{0} is deprecated. Please use instead a multipolygon.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=causeway]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'causeway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=low_water_crossing"
# suggestAlternative:"embankment=yes"
# suggestAlternative:"ford=yes"
err.append({'class': 9002001, 'subclass': 461671124, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=swing]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'swing'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge:movable=swing"
# suggestAlternative:"bridge:structure=simple-suspension"
err.append({'class': 9002001, 'subclass': 1047428067, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=suspension]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=yes + bridge:structure=suspension"
# fixAdd:"bridge:structure=suspension"
# fixAdd:"bridge=yes"
err.append({'class': 9002001, 'subclass': 1157046268, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:structure','suspension'],
['bridge','yes']])
}})
# *[bridge=pontoon]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'pontoon'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=yes + bridge:structure=floating"
# fixAdd:"bridge:structure=floating"
# fixAdd:"bridge=yes"
err.append({'class': 9002001, 'subclass': 1195531951, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:structure','floating'],
['bridge','yes']])
}})
# *[fee=interval]
# *[lit=interval]
# *[supervised=interval]
if ('fee' in keys) or ('lit' in keys) or ('supervised' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fee') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lit') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'supervised') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Please specify interval by using opening_hours syntax","{0.tag}")
err.append({'class': 9002001, 'subclass': 417886592, 'text': mapcss.tr('{0} is deprecated. Please specify interval by using opening_hours syntax', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[/josm\/ignore/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_5ee0acf2))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwError:tr("{0} is deprecated. Please delete this object and use a private layer instead","{0.key}")
# fixDeleteObject:this
err.append({'class': 9002001, 'subclass': 1402743016, 'text': mapcss.tr('{0} is deprecated. Please delete this object and use a private layer instead', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[sport=diving]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'diving'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=cliff_diving"
# suggestAlternative:"sport=scuba_diving"
err.append({'class': 9002001, 'subclass': 590643159, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[parking=park_and_ride]
if ('parking' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'parking') == mapcss._value_capture(capture_tags, 0, 'park_and_ride'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=parking + park_ride=yes"
# fixAdd:"amenity=parking"
# fixAdd:"park_ride=yes"
# fixRemove:"parking"
err.append({'class': 9002001, 'subclass': 1893516041, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','parking'],
['park_ride','yes']]),
'-': ([
'parking'])
}})
# *[playground=yes]
# *[manhole=plain]
# *[manhole=unknown]
# *[manhole=yes]
# *[police=yes]
# *[traffic_calming=yes]
# *[access=restricted]
# *[barrier=yes]
# *[aerialway=yes][!public_transport]
# *[amenity=yes]
# *[leisure=yes]
# *[shop="*"]
# *[shop=yes][amenity!=fuel]
# *[craft=yes]
# *[service=yes]
# *[place=yes]
if ('access' in keys) or ('aerialway' in keys) or ('amenity' in keys) or ('barrier' in keys) or ('craft' in keys) or ('leisure' in keys) or ('manhole' in keys) or ('place' in keys) or ('playground' in keys) or ('police' in keys) or ('service' in keys) or ('shop' in keys) or ('traffic_calming' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'playground') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'plain'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'police') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'traffic_calming') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'restricted'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'aerialway') == mapcss._value_capture(capture_tags, 0, 'yes') and not mapcss._tag_capture(capture_tags, 1, tags, 'public_transport'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, '*'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'yes') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') != mapcss._value_const_capture(capture_tags, 1, 'fuel', 'fuel'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'craft') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'service') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0}={1} is unspecific. Please replace ''{1}'' by a specific value.","{0.key}","{0.value}")
err.append({'class': 9002007, 'subclass': 727505823, 'text': mapcss.tr('{0}={1} is unspecific. Please replace \'\'{1}\'\' by a specific value.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[place_name][!name]
if ('place_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place_name') and not mapcss._tag_capture(capture_tags, 1, tags, 'name'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} should be replaced with {1}","{0.key}","{1.key}")
# fixChangeKey:"place_name => name"
err.append({'class': 9002008, 'subclass': 1089331760, 'text': mapcss.tr('{0} should be replaced with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['name', mapcss.tag(tags, 'place_name')]]),
'-': ([
'place_name'])
}})
# *[place][place_name=*name]
if ('place' in keys and 'place_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place') and mapcss._tag_capture(capture_tags, 1, tags, 'place_name') == mapcss._value_capture(capture_tags, 1, mapcss.tag(tags, 'name')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} = {1}; remove {0}","{1.key}","{1.value}")
# fixRemove:"{1.key}"
err.append({'class': 9002009, 'subclass': 1116761280, 'text': mapcss.tr('{0} = {1}; remove {0}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{1.value}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{1.key}')])
}})
# *[waterway=water_point]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'water_point'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=water_point"
# fixChangeKey:"waterway => amenity"
err.append({'class': 9002001, 'subclass': 103347605, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'waterway')]]),
'-': ([
'waterway'])
}})
# *[waterway=waste_disposal]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'waste_disposal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=waste_disposal"
# fixChangeKey:"waterway => amenity"
err.append({'class': 9002001, 'subclass': 1963461348, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'waterway')]]),
'-': ([
'waterway'])
}})
# *[waterway=mooring]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'mooring'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"mooring=yes"
# fixAdd:"mooring=yes"
# fixRemove:"waterway"
err.append({'class': 9002001, 'subclass': 81358738, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['mooring','yes']]),
'-': ([
'waterway'])
}})
# *[building][levels]
# *[building:part=yes][levels]
if ('building' in keys and 'levels' in keys) or ('building:part' in keys and 'levels' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') and mapcss._tag_capture(capture_tags, 1, tags, 'levels'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:part') == mapcss._value_capture(capture_tags, 0, 'yes') and mapcss._tag_capture(capture_tags, 1, tags, 'levels'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.key}")
# suggestAlternative:"building:levels"
# fixChangeKey:"levels => building:levels"
err.append({'class': 9002001, 'subclass': 293177436, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building:levels', mapcss.tag(tags, 'levels')]]),
'-': ([
'levels'])
}})
# *[protected_class]
if ('protected_class' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'protected_class'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"protect_class"
# fixChangeKey:"protected_class => protect_class"
err.append({'class': 9002001, 'subclass': 716999373, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['protect_class', mapcss.tag(tags, 'protected_class')]]),
'-': ([
'protected_class'])
}})
# *[kerb=unknown]
# *[lock=unknown]
# *[hide=unknown]
# *[shelter=unknown]
# *[access=unknown]
# *[capacity:parent=unknown]
# *[capacity:women=unknown]
# *[capacity:disabled=unknown]
# *[crossing=unknown]
# *[foot=unknown]
if ('access' in keys) or ('capacity:disabled' in keys) or ('capacity:parent' in keys) or ('capacity:women' in keys) or ('crossing' in keys) or ('foot' in keys) or ('hide' in keys) or ('kerb' in keys) or ('lock' in keys) or ('shelter' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kerb') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lock') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hide') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shelter') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:parent') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:women') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:disabled') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'crossing') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'foot') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Unspecific tag {0}","{0.tag}")
err.append({'class': 9002010, 'subclass': 1052866123, 'text': mapcss.tr('Unspecific tag {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=skiing]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'skiing'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("Definition of {0} is unclear","{0.tag}")
# suggestAlternative:tr("{0} + {1} + {2}","piste:type=*","piste:difficulty=*","piste:grooming=*")
err.append({'class': 9002001, 'subclass': 1578959559, 'text': mapcss.tr('Definition of {0} is unclear', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[waterway=wadi]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'wadi'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=valley"
# suggestAlternative:"{0.key}=* + intermittent=yes"
err.append({'class': 9002001, 'subclass': 719234223, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[drinkable]
if ('drinkable' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'drinkable'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"drinking_water"
err.append({'class': 9002001, 'subclass': 1785584789, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[color][!colour]
if ('color' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'color') and not mapcss._tag_capture(capture_tags, 1, tags, 'colour'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"colour"
# fixChangeKey:"color => colour"
err.append({'class': 9002001, 'subclass': 1850270072, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['colour', mapcss.tag(tags, 'color')]]),
'-': ([
'color'])
}})
# *[color][colour][color=*colour]
if ('color' in keys and 'colour' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'color') and mapcss._tag_capture(capture_tags, 1, tags, 'colour') and mapcss._tag_capture(capture_tags, 2, tags, 'color') == mapcss._value_capture(capture_tags, 2, mapcss.tag(tags, 'colour')))
except mapcss.RuleAbort: pass
if match:
# setsamecolor
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1}","{0.key}","{1.key}")
# suggestAlternative:"colour"
# fixRemove:"color"
set_samecolor = True
err.append({'class': 9002001, 'subclass': 1825345743, 'text': mapcss.tr('{0} together with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
'color'])
}})
# *[color][colour]!.samecolor
if ('color' in keys and 'colour' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_samecolor and mapcss._tag_capture(capture_tags, 0, tags, 'color') and mapcss._tag_capture(capture_tags, 1, tags, 'colour'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1} and conflicting values","{0.key}","{1.key}")
# suggestAlternative:"colour"
err.append({'class': 9002001, 'subclass': 1064658218, 'text': mapcss.tr('{0} together with {1} and conflicting values', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}'))})
# *[building:color][building:colour]!.samebuildingcolor
# Rule Blacklisted
# *[roof:color][roof:colour]!.sameroofcolor
# Rule Blacklisted
# *[/:color/][!building:color][!roof:color][!gpxd:color]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_554de4c7) and not mapcss._tag_capture(capture_tags, 1, tags, 'building:color') and not mapcss._tag_capture(capture_tags, 2, tags, 'roof:color') and not mapcss._tag_capture(capture_tags, 3, tags, 'gpxd:color'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:":colour"
err.append({'class': 9002001, 'subclass': 1632389707, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/color:/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_0c5b5730))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"colour:"
err.append({'class': 9002001, 'subclass': 1390370717, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/=|\+|\/|&|<|>|;|'|"|%|#|@|\\|,|\.|\{|\}|\?|\*|\^|\$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_620f4d52))
except mapcss.RuleAbort: pass
if match:
# group:tr("key with uncommon character")
# throwWarning:tr("{0}","{0.key}")
err.append({'class': 9002011, 'subclass': 1752615188, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/^.$/]
# node[/^..$/][!kp][!pk]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_27210286))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_34c15d62) and not mapcss._tag_capture(capture_tags, 1, tags, 'kp') and not mapcss._tag_capture(capture_tags, 2, tags, 'pk'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("uncommon short key")
# assertMatch:"node f=b"
# assertMatch:"node fo=bar"
# assertNoMatch:"node kp=5"
# assertNoMatch:"node pk=7"
err.append({'class': 9002012, 'subclass': 79709106, 'text': mapcss.tr('uncommon short key')})
# *[sport=hockey]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'hockey'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=field_hockey"
# suggestAlternative:"sport=ice_hockey"
err.append({'class': 9002001, 'subclass': 651933474, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=billard]
# *[sport=billards]
# *[sport=billiard]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billard'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billards'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billiard'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=billiards"
# fixAdd:"sport=billiards"
err.append({'class': 9002001, 'subclass': 1522897824, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','billiards']])
}})
# *[payment:credit_cards=yes]
if ('payment:credit_cards' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:credit_cards') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:mastercard=yes","payment:visa=yes")
err.append({'class': 9002013, 'subclass': 705181097, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:mastercard=yes', 'payment:visa=yes')})
# *[payment:debit_cards=yes]
if ('payment:debit_cards' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:debit_cards') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:maestro=yes","payment:girocard=yes")
err.append({'class': 9002013, 'subclass': 679215558, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:maestro=yes', 'payment:girocard=yes')})
# *[payment:electronic_purses=yes]
if ('payment:electronic_purses' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:electronic_purses') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:ep_geldkarte=yes","payment:ep_quick=yes")
err.append({'class': 9002013, 'subclass': 1440457244, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:ep_geldkarte=yes', 'payment:ep_quick=yes')})
# *[payment:cryptocurrencies=yes]
if ('payment:cryptocurrencies' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:cryptocurrencies') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:bitcoin=yes","payment:litecoin=yes")
err.append({'class': 9002013, 'subclass': 1325255949, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:bitcoin=yes', 'payment:litecoin=yes')})
# *[payment:ep_quick]
# *[payment:ep_cash]
# *[payment:ep_proton]
# *[payment:ep_chipknip]
if ('payment:ep_cash' in keys) or ('payment:ep_chipknip' in keys) or ('payment:ep_proton' in keys) or ('payment:ep_quick' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_quick'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_cash'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_proton'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_chipknip'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 332575437, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[kp][railway!=milestone]
if ('kp' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kp') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') != mapcss._value_const_capture(capture_tags, 1, 'milestone', 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"distance"
# fixChangeKey:"kp => distance"
err.append({'class': 9002001, 'subclass': 1256703107, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['distance', mapcss.tag(tags, 'kp')]]),
'-': ([
'kp'])
}})
# *[pk][railway!=milestone]
if ('pk' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pk') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') != mapcss._value_const_capture(capture_tags, 1, 'milestone', 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"distance"
# fixChangeKey:"pk => distance"
err.append({'class': 9002001, 'subclass': 1339969759, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['distance', mapcss.tag(tags, 'pk')]]),
'-': ([
'pk'])
}})
# *[kp][railway=milestone]
if ('kp' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kp') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"railway:position"
# fixChangeKey:"kp => railway:position"
err.append({'class': 9002001, 'subclass': 1667272140, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'kp')]]),
'-': ([
'kp'])
}})
# *[pk][railway=milestone]
if ('pk' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pk') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"railway:position"
# fixChangeKey:"pk => railway:position"
err.append({'class': 9002001, 'subclass': 691355164, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'pk')]]),
'-': ([
'pk'])
}})
# *[distance][railway=milestone]
if ('distance' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'distance') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"railway:position"
# fixChangeKey:"distance => railway:position"
err.append({'class': 9002001, 'subclass': 113691181, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'distance')]]),
'-': ([
'distance'])
}})
# *[postcode]
if ('postcode' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'postcode'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"addr:postcode"
# suggestAlternative:"postal_code"
err.append({'class': 9002001, 'subclass': 1942523538, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[water=intermittent]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'intermittent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"intermittent=yes"
# fixAdd:"intermittent=yes"
# fixRemove:"water"
err.append({'class': 9002001, 'subclass': 813530321, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['intermittent','yes']]),
'-': ([
'water'])
}})
# node[type][pipeline=marker]
if ('pipeline' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') and mapcss._tag_capture(capture_tags, 1, tags, 'pipeline') == mapcss._value_capture(capture_tags, 1, 'marker'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"substance"
# fixChangeKey:"type => substance"
err.append({'class': 9002001, 'subclass': 1878458659, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['substance', mapcss.tag(tags, 'type')]]),
'-': ([
'type'])
}})
# *[landuse=farm]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'farm'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=farmland"
# suggestAlternative:"landuse=farmyard"
err.append({'class': 9002001, 'subclass': 1968473048, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[seamark=buoy]["seamark:type"=~/^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$/]
if ('seamark' in keys and 'seamark:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'seamark') == mapcss._value_capture(capture_tags, 0, 'buoy') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_61b0be1b), mapcss._tag_capture(capture_tags, 1, tags, 'seamark:type')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"{1.tag}"
# fixRemove:"seamark"
err.append({'class': 9002001, 'subclass': 1224401740, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'seamark'])
}})
# *[seamark=buoy]["seamark:type"!~/^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$/]
if ('seamark' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'seamark') == mapcss._value_capture(capture_tags, 0, 'buoy') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_61b0be1b, '^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$'), mapcss._tag_capture(capture_tags, 1, tags, 'seamark:type')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"{1.tag}"
err.append({'class': 9002001, 'subclass': 1481035998, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[landuse=conservation]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'conservation'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"boundary=protected_area"
# fixAdd:"boundary=protected_area"
# fixRemove:"landuse"
err.append({'class': 9002001, 'subclass': 824801072, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['boundary','protected_area']]),
'-': ([
'landuse'])
}})
# *[amenity=kiosk]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'kiosk'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=kiosk"
# fixChangeKey:"amenity => shop"
err.append({'class': 9002001, 'subclass': 1331930630, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=shop]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'shop'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=*"
err.append({'class': 9002001, 'subclass': 1562207150, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=fishmonger]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fishmonger'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=seafood"
# fixAdd:"shop=seafood"
err.append({'class': 9002001, 'subclass': 1376789416, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','seafood']])
}})
# *[shop=fish]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fish'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=fishing"
# suggestAlternative:"shop=pet"
# suggestAlternative:"shop=seafood"
err.append({'class': 9002001, 'subclass': 47191734, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=betting]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'betting'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=casino"
# suggestAlternative:"amenity=gambling"
# suggestAlternative:"leisure=adult_gaming_centre"
# suggestAlternative:"leisure=amusement_arcade"
# suggestAlternative:"shop=bookmaker"
# suggestAlternative:"shop=lottery"
err.append({'class': 9002001, 'subclass': 1035501389, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=perfume]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'perfume'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=perfumery"
# fixAdd:"shop=perfumery"
err.append({'class': 9002001, 'subclass': 2075099676, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','perfumery']])
}})
# *[amenity=exercise_point]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'exercise_point'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=fitness_station"
# fixRemove:"amenity"
# fixAdd:"leisure=fitness_station"
err.append({'class': 9002001, 'subclass': 1514920202, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure','fitness_station']]),
'-': ([
'amenity'])
}})
# *[shop=auto_parts]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'auto_parts'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=car_parts"
# fixAdd:"shop=car_parts"
err.append({'class': 9002001, 'subclass': 1675828779, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','car_parts']])
}})
# *[amenity=car_repair]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'car_repair'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=car_repair"
# fixChangeKey:"amenity => shop"
err.append({'class': 9002001, 'subclass': 1681273585, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=studio][type=audio]
# *[amenity=studio][type=radio]
# *[amenity=studio][type=television]
# *[amenity=studio][type=video]
if ('amenity' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'audio'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'radio'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'television'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'video'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"studio"
# fixChangeKey:"type => studio"
err.append({'class': 9002001, 'subclass': 413401822, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['studio', mapcss.tag(tags, 'type')]]),
'-': ([
'type'])
}})
# *[power=cable_distribution_cabinet]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'cable_distribution_cabinet'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=street_cabinet + street_cabinet=*"
# fixAdd:"man_made=street_cabinet"
# fixRemove:"power"
err.append({'class': 9002001, 'subclass': 1007567078, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','street_cabinet']]),
'-': ([
'power'])
}})
# *[power][location=kiosk]
if ('location' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') and mapcss._tag_capture(capture_tags, 1, tags, 'location') == mapcss._value_capture(capture_tags, 1, 'kiosk'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# fixRemove:"location"
# fixAdd:"man_made=street_cabinet"
# fixAdd:"street_cabinet=power"
err.append({'class': 9002001, 'subclass': 182905067, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','street_cabinet'],
['street_cabinet','power']]),
'-': ([
'location'])
}})
# *[man_made=well]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'well'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=petroleum_well"
# suggestAlternative:"man_made=water_well"
err.append({'class': 9002001, 'subclass': 1740864107, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=dog_bin]
# *[amenity=dog_waste_bin]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'dog_bin'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'dog_waste_bin'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=waste_basket + waste=dog_excrement + vending=excrement_bags"
# fixAdd:"amenity=waste_basket"
# fixAdd:"vending=excrement_bags"
# fixAdd:"waste=dog_excrement"
err.append({'class': 9002001, 'subclass': 2091877281, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','waste_basket'],
['vending','excrement_bags'],
['waste','dog_excrement']])
}})
# *[amenity=artwork]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'artwork'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=artwork"
# fixRemove:"amenity"
# fixAdd:"tourism=artwork"
err.append({'class': 9002001, 'subclass': 728429076, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tourism','artwork']]),
'-': ([
'amenity'])
}})
# *[amenity=community_center]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'community_center'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=community_centre"
# fixAdd:"amenity=community_centre"
err.append({'class': 9002001, 'subclass': 690512681, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','community_centre']])
}})
# *[man_made=cut_line]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'cut_line'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=cutline"
# fixAdd:"man_made=cutline"
err.append({'class': 9002001, 'subclass': 1008752382, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','cutline']])
}})
# *[amenity=park]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'park'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=park"
# fixRemove:"amenity"
# fixAdd:"leisure=park"
err.append({'class': 9002001, 'subclass': 2085280194, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure','park']]),
'-': ([
'amenity'])
}})
# *[amenity=hotel]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hotel'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=hotel"
# fixRemove:"amenity"
# fixAdd:"tourism=hotel"
err.append({'class': 9002001, 'subclass': 1341786818, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tourism','hotel']]),
'-': ([
'amenity'])
}})
# *[shop=window]
# *[shop=windows]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'window'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'windows'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=window_construction"
# fixAdd:"craft=window_construction"
# fixRemove:"shop"
err.append({'class': 9002001, 'subclass': 532391183, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['craft','window_construction']]),
'-': ([
'shop'])
}})
# *[amenity=education]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'education'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=college"
# suggestAlternative:"amenity=school"
# suggestAlternative:"amenity=university"
err.append({'class': 9002001, 'subclass': 796960259, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=gallery]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'gallery'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=art"
# fixAdd:"shop=art"
err.append({'class': 9002001, 'subclass': 1319611546, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','art']])
}})
# *[shop=gambling]
# *[leisure=gambling]
if ('leisure' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'gambling'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'gambling'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=casino"
# suggestAlternative:"amenity=gambling"
# suggestAlternative:"leisure=amusement_arcade"
# suggestAlternative:"shop=bookmaker"
# suggestAlternative:"shop=lottery"
err.append({'class': 9002001, 'subclass': 1955724853, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[office=real_estate]
# *[office=real_estate_agent]
if ('office' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'real_estate'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'real_estate_agent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=estate_agent"
# fixAdd:"office=estate_agent"
err.append({'class': 9002001, 'subclass': 2027311706, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['office','estate_agent']])
}})
# *[shop=glass]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'glass'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=glaziery"
# suggestAlternative:"shop=glaziery"
err.append({'class': 9002001, 'subclass': 712020531, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=proposed]
# *[amenity=disused]
# *[shop=disused]
# *[highway=abandoned]
# *[historic=abandoned]
if ('amenity' in keys) or ('highway' in keys) or ('historic' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'proposed'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Use the {1}: key prefix instead.","{0.tag}","{0.value}")
err.append({'class': 9002001, 'subclass': 847809313, 'text': mapcss.tr('{0} is deprecated. Use the {1}: key prefix instead.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[amenity=swimming_pool]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'swimming_pool'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=swimming_pool"
# fixChangeKey:"amenity => leisure"
err.append({'class': 9002001, 'subclass': 2012807801, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=sauna]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'sauna'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=sauna"
# fixChangeKey:"amenity => leisure"
err.append({'class': 9002001, 'subclass': 1450116742, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[/^[^t][^i][^g].+_[0-9]$/][!/^note_[0-9]$/][!/^description_[0-9]$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_300dfa36) and not mapcss._tag_capture(capture_tags, 1, tags, self.re_3185ac6d) and not mapcss._tag_capture(capture_tags, 2, tags, self.re_6d27b157))
except mapcss.RuleAbort: pass
if match:
# group:tr("questionable key (ending with a number)")
# throwWarning:tr("{0}","{0.key}")
err.append({'class': 9002014, 'subclass': 2081989305, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[sport=skating]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'skating'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=ice_skating"
# suggestAlternative:"sport=roller_skating"
err.append({'class': 9002001, 'subclass': 170699177, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=public_building]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'public_building'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"..."
# suggestAlternative:"amenity=community_centre"
# suggestAlternative:"amenity=hospital"
# suggestAlternative:"amenity=townhall"
# suggestAlternative:"building=hospital"
# suggestAlternative:"building=public"
# suggestAlternative:"leisure=sports_centre"
# suggestAlternative:"office=government"
err.append({'class': 9002001, 'subclass': 1295642010, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[office=administrative]
if ('office' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'administrative'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=government"
# fixAdd:"office=government"
err.append({'class': 9002001, 'subclass': 213844674, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['office','government']])
}})
# *[vending=news_papers]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'news_papers'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"vending=newspapers"
# fixAdd:"vending=newspapers"
err.append({'class': 9002001, 'subclass': 1133820292, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['vending','newspapers']])
}})
# *[service=drive_through]
if ('service' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'service') == mapcss._value_capture(capture_tags, 0, 'drive_through'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"service=drive-through"
# fixAdd:"service=drive-through"
err.append({'class': 9002001, 'subclass': 283545650, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['service','drive-through']])
}})
# *[noexit][noexit!=yes][noexit!=no]
if ('noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') and mapcss._tag_capture(capture_tags, 1, tags, 'noexit') != mapcss._value_const_capture(capture_tags, 1, 'yes', 'yes') and mapcss._tag_capture(capture_tags, 2, tags, 'noexit') != mapcss._value_const_capture(capture_tags, 2, 'no', 'no'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("The key {0} has an uncommon value.","{1.key}")
err.append({'class': 9002017, 'subclass': 1357403556, 'text': mapcss.tr('The key {0} has an uncommon value.', mapcss._tag_uncapture(capture_tags, '{1.key}'))})
# *[name:botanical]
if ('name:botanical' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'name:botanical'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"species"
err.append({'class': 9002001, 'subclass': 1061429000, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# node[pole=air_to_ground]
# node[pole=transition]
if ('pole' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole') == mapcss._value_capture(capture_tags, 0, 'air_to_ground'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole') == mapcss._value_capture(capture_tags, 0, 'transition'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location:transition=yes"
# fixAdd:"location:transition=yes"
# fixRemove:"pole"
err.append({'class': 9002001, 'subclass': 647400518, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location:transition','yes']]),
'-': ([
'pole'])
}})
# node[tower=air_to_ground]
# node[tower=transition]
if ('tower' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower') == mapcss._value_capture(capture_tags, 0, 'air_to_ground'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower') == mapcss._value_capture(capture_tags, 0, 'transition'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location:transition=yes"
# fixAdd:"location:transition=yes"
# fixRemove:"tower"
err.append({'class': 9002001, 'subclass': 1616290060, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location:transition','yes']]),
'-': ([
'tower'])
}})
# *[shop=souvenir]
# *[shop=souvenirs]
# *[shop=souveniers]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souvenir'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souvenirs'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souveniers'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=gift"
# fixAdd:"shop=gift"
err.append({'class': 9002001, 'subclass': 1794702946, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','gift']])
}})
# *[vending=animal_food]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'animal_food'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"vending=animal_feed"
# fixAdd:"vending=animal_feed"
err.append({'class': 9002001, 'subclass': 1077411296, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['vending','animal_feed']])
}})
# node[vending=photos][amenity=vending_machine]
# node[vending=photo][amenity=vending_machine]
if ('amenity' in keys and 'vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'photos') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') == mapcss._value_capture(capture_tags, 1, 'vending_machine'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'photo') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') == mapcss._value_capture(capture_tags, 1, 'vending_machine'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=photo_booth"
# fixAdd:"amenity=photo_booth"
# fixRemove:"vending"
err.append({'class': 9002001, 'subclass': 1387510120, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','photo_booth']]),
'-': ([
'vending'])
}})
# node[vending=photos][amenity!=vending_machine]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'photos') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') != mapcss._value_const_capture(capture_tags, 1, 'vending_machine', 'vending_machine'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=photo_booth"
err.append({'class': 9002001, 'subclass': 1506790891, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# node[highway=emergency_access_point][phone][!emergency_telephone_code]
if ('highway' in keys and 'phone' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'emergency_access_point') and mapcss._tag_capture(capture_tags, 1, tags, 'phone') and not mapcss._tag_capture(capture_tags, 2, tags, 'emergency_telephone_code'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"emergency_telephone_code"
# fixChangeKey:"phone => emergency_telephone_code"
# assertNoMatch:"node highway=emergency_access_point emergency_telephone_code=456"
# assertNoMatch:"node highway=emergency_access_point phone=123 emergency_telephone_code=456"
# assertMatch:"node highway=emergency_access_point phone=123"
# assertNoMatch:"node phone=123"
err.append({'class': 9002001, 'subclass': 1339208019, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency_telephone_code', mapcss.tag(tags, 'phone')]]),
'-': ([
'phone'])
}})
# node[highway=emergency_access_point][phone=*emergency_telephone_code]
if ('highway' in keys and 'phone' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'emergency_access_point') and mapcss._tag_capture(capture_tags, 1, tags, 'phone') == mapcss._value_capture(capture_tags, 1, mapcss.tag(tags, 'emergency_telephone_code')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"emergency_telephone_code"
# fixRemove:"phone"
# assertNoMatch:"node highway=emergency_access_point emergency_telephone_code=123"
# assertMatch:"node highway=emergency_access_point phone=123 emergency_telephone_code=123"
# assertNoMatch:"node highway=emergency_access_point phone=123"
err.append({'class': 9002001, 'subclass': 342466099, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'phone'])
}})
# node[highway=emergency_access_point][phone][emergency_telephone_code][phone!=*emergency_telephone_code]
if ('emergency_telephone_code' in keys and 'highway' in keys and 'phone' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'emergency_access_point') and mapcss._tag_capture(capture_tags, 1, tags, 'phone') and mapcss._tag_capture(capture_tags, 2, tags, 'emergency_telephone_code') and mapcss._tag_capture(capture_tags, 3, tags, 'phone') != mapcss._value_capture(capture_tags, 3, mapcss.tag(tags, 'emergency_telephone_code')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"emergency_telephone_code"
# assertNoMatch:"node highway=emergency_access_point emergency_telephone_code=123"
# assertNoMatch:"node highway=emergency_access_point phone=123 emergency_telephone_code=123"
# assertNoMatch:"node highway=emergency_access_point phone=123"
err.append({'class': 9002001, 'subclass': 663070970, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=hunting_stand][lock=yes]
# *[amenity=hunting_stand][lock=no]
if ('amenity' in keys and 'lock' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hunting_stand') and mapcss._tag_capture(capture_tags, 1, tags, 'lock') == mapcss._value_capture(capture_tags, 1, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hunting_stand') and mapcss._tag_capture(capture_tags, 1, tags, 'lock') == mapcss._value_capture(capture_tags, 1, 'no'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"lockable"
# fixChangeKey:"lock => lockable"
err.append({'class': 9002001, 'subclass': 1939599742, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['lockable', mapcss.tag(tags, 'lock')]]),
'-': ([
'lock'])
}})
# *[amenity=advertising][!advertising]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'advertising') and not mapcss._tag_capture(capture_tags, 1, tags, 'advertising'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"advertising=*"
err.append({'class': 9002001, 'subclass': 1696784412, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=advertising][advertising]
if ('advertising' in keys and 'amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'advertising') and mapcss._tag_capture(capture_tags, 1, tags, 'advertising'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"advertising=*"
# fixRemove:"amenity"
err.append({'class': 9002001, 'subclass': 1538706366, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'amenity'])
}})
# *[building=true]
# *[building="*"]
# *[building=Y]
# *[building=y]
# *[building=1]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'true'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, '*'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'Y'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'y'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 1))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"building=yes"
# fixAdd:"building=yes"
err.append({'class': 9002018, 'subclass': 596818855, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes']])
}})
# *[building=abandoned]
# *[building=address]
# *[building=bing]
# *[building=collapsed]
# *[building=damaged]
# *[building=demolished]
# *[building=disused]
# *[building=fixme]
# *[building=occupied]
# *[building=razed]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'address'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'bing'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'collapsed'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'damaged'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'demolished'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'fixme'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'occupied'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'razed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is not a building type.","{0.tag}")
err.append({'class': 9002001, 'subclass': 938825828, 'text': mapcss.tr('{0} is not a building type.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=other]
# *[building=unclassified]
# *[building=undefined]
# *[building=unknown]
# *[building=unidentified]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'other'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unclassified'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'undefined'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unidentified'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is not a building type.","{0.tag}")
# fixAdd:"building=yes"
err.append({'class': 9002001, 'subclass': 48721080, 'text': mapcss.tr('{0} is not a building type.', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes']])
}})
# node[power=transformer][location=pole][transformer]
if ('location' in keys and 'power' in keys and 'transformer' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'transformer') and mapcss._tag_capture(capture_tags, 1, tags, 'location') == mapcss._value_capture(capture_tags, 1, 'pole') and mapcss._tag_capture(capture_tags, 2, tags, 'transformer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1}","{0.tag}","{1.tag}")
# fixChangeKey:"location => power"
err.append({'class': 9002001, 'subclass': 161456790, 'text': mapcss.tr('{0} together with {1}', mapcss._tag_uncapture(capture_tags, '{0.tag}'), mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['power', mapcss.tag(tags, 'location')]]),
'-': ([
'location'])
}})
# node[power=transformer][location=pole][!transformer]
if ('location' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'transformer') and mapcss._tag_capture(capture_tags, 1, tags, 'location') == mapcss._value_capture(capture_tags, 1, 'pole') and not mapcss._tag_capture(capture_tags, 2, tags, 'transformer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1}","{0.tag}","{1.tag}")
# fixChangeKey:"location => power"
# fixAdd:"transformer=yes"
err.append({'class': 9002001, 'subclass': 1830605870, 'text': mapcss.tr('{0} together with {1}', mapcss._tag_uncapture(capture_tags, '{0.tag}'), mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['power', mapcss.tag(tags, 'location')],
['transformer','yes']]),
'-': ([
'location'])
}})
# node[tourism=picnic_table]
# node[amenity=picnic_table]
# node[leisure=picnic]
# node[leisure=picnic_site]
if ('amenity' in keys) or ('leisure' in keys) or ('tourism' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tourism') == mapcss._value_capture(capture_tags, 0, 'picnic_table'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'picnic_table'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'picnic'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'picnic_site'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=picnic_table"
# suggestAlternative:"tourism=picnic_site"
err.append({'class': 9002001, 'subclass': 480506019, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=toilet]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'toilet'))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"amenity=toilets"
# fixAdd:"amenity=toilets"
err.append({'class': 9002018, 'subclass': 440018606, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','toilets']])
}})
# *[man_made=MDF]
# *[man_made=telephone_exchange]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'MDF'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'telephone_exchange'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixRemove:"man_made"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 634698090, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','exchange']]),
'-': ([
'man_made'])
}})
# *[building=central_office]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'central_office'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixAdd:"building=yes"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 1091970270, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes'],
['telecom','exchange']])
}})
# *[telecom=central_office]
if ('telecom' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'telecom') == mapcss._value_capture(capture_tags, 0, 'central_office'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 1503278830, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','exchange']])
}})
# node[communication=outdoor_dslam]
# node[man_made=outdoor_dslam]
# node[street_cabinet=outdoor_dslam]
if ('communication' in keys) or ('man_made' in keys) or ('street_cabinet' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'communication') == mapcss._value_capture(capture_tags, 0, 'outdoor_dslam'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'outdoor_dslam'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'street_cabinet') == mapcss._value_capture(capture_tags, 0, 'outdoor_dslam'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=service_device"
# fixAdd:"telecom=service_device"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 1243371306, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','service_device']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# node[telecom=dslam]
# node[telecom=outdoor_dslam]
if ('telecom' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'telecom') == mapcss._value_capture(capture_tags, 0, 'dslam'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'telecom') == mapcss._value_capture(capture_tags, 0, 'outdoor_dslam'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=service_device"
# fixAdd:"telecom=service_device"
err.append({'class': 9002001, 'subclass': 781930166, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','service_device']])
}})
# node[amenity=fire_hydrant]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'fire_hydrant'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=fire_hydrant"
# fixChangeKey:"amenity => emergency"
err.append({'class': 9002001, 'subclass': 967497433, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# node[fire_hydrant:type=pond]
if ('fire_hydrant:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fire_hydrant:type') == mapcss._value_capture(capture_tags, 0, 'pond'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"water_source=pond"
# fixAdd:"water_source=pond"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 1583105855, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['water_source','pond']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# node[fire_hydrant:flow_capacity]
if ('fire_hydrant:flow_capacity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fire_hydrant:flow_capacity'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"flow_rate"
err.append({'class': 9002001, 'subclass': 1864683984, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# node[emergency=fire_hydrant][in_service=no]
if ('emergency' in keys and 'in_service' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'emergency') == mapcss._value_capture(capture_tags, 0, 'fire_hydrant') and mapcss._tag_capture(capture_tags, 1, tags, 'in_service') == mapcss._value_capture(capture_tags, 1, 'no'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# suggestAlternative:"disused:emergency=fire_hydrant"
# fixAdd:"disused:emergency=fire_hydrant"
# fixRemove:"{0.key}"
# fixRemove:"{1.key}"
err.append({'class': 9002001, 'subclass': 552149777, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['disused:emergency','fire_hydrant']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}'),
mapcss._tag_uncapture(capture_tags, '{1.key}')])
}})
# node[fire_hydrant:water_source]
if ('fire_hydrant:water_source' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fire_hydrant:water_source'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"water_source"
# fixChangeKey:"fire_hydrant:water_source => water_source"
err.append({'class': 9002001, 'subclass': 1207497718, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['water_source', mapcss.tag(tags, 'fire_hydrant:water_source')]]),
'-': ([
'fire_hydrant:water_source'])
}})
# *[natural=waterfall]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'waterfall'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"waterway=waterfall"
# fixChangeKey:"natural => waterway"
err.append({'class': 9002001, 'subclass': 764711734, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['waterway', mapcss.tag(tags, 'natural')]]),
'-': ([
'natural'])
}})
# *[religion=unitarian]
if ('religion' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'religion') == mapcss._value_capture(capture_tags, 0, 'unitarian'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"religion=unitarian_universalist"
# fixAdd:"religion=unitarian_universalist"
err.append({'class': 9002001, 'subclass': 9227331, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['religion','unitarian_universalist']])
}})
# *[shop=shopping_centre]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'shopping_centre'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=mall"
# fixAdd:"shop=mall"
err.append({'class': 9002001, 'subclass': 1448390566, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','mall']])
}})
# *[is_in]
# node[/^is_in:.*$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'is_in'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_493fd1a6))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 355584917, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[sport=football]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'football'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=american_football"
# suggestAlternative:"sport=australian_football"
# suggestAlternative:"sport=canadian_football"
# suggestAlternative:"sport=gaelic_games"
# suggestAlternative:"sport=rugby_league"
# suggestAlternative:"sport=rugby_union"
# suggestAlternative:"sport=soccer"
err.append({'class': 9002001, 'subclass': 73038577, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=common]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'common'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"designation=common"
# suggestAlternative:"landuse=*"
# suggestAlternative:"leisure=*"
err.append({'class': 9002001, 'subclass': 157636301, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[cuisine=vegan]
# *[cuisine=vegetarian]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') == mapcss._value_capture(capture_tags, 0, 'vegan'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') == mapcss._value_capture(capture_tags, 0, 'vegetarian'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("diet:","{0.value}","=only")
# suggestAlternative:concat("diet:","{0.value}","=yes")
# throwWarning:tr("{0} is deprecated","{0.tag}")
err.append({'class': 9002001, 'subclass': 43604574, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[kitchen_hours]
if ('kitchen_hours' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kitchen_hours'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"opening_hours:kitchen"
# fixChangeKey:"kitchen_hours => opening_hours:kitchen"
err.append({'class': 9002001, 'subclass': 1088306802, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['opening_hours:kitchen', mapcss.tag(tags, 'kitchen_hours')]]),
'-': ([
'kitchen_hours'])
}})
# *[shop=money_transfer]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'money_transfer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=money_transfer"
# fixChangeKey:"shop => amenity"
err.append({'class': 9002001, 'subclass': 1664997936, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'shop')]]),
'-': ([
'shop'])
}})
# *[contact:google_plus]
if ('contact:google_plus' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'contact:google_plus'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"contact:google_plus"
err.append({'class': 9002001, 'subclass': 1869461154, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
'contact:google_plus'])
}})
# *[amenity=garages]
# *[amenity=garage]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'garages'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'garage'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("building=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=parking + parking=garage_boxes"
# suggestAlternative:"landuse=garages"
err.append({'class': 9002001, 'subclass': 863228118, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=winery]
# *[amenity=winery]
if ('amenity' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'winery'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'winery'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=winery"
# suggestAlternative:"shop=wine"
err.append({'class': 9002001, 'subclass': 1773574987, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=youth_centre]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'youth_centre'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=community_centre + community_centre=youth_centre"
# fixAdd:"amenity=community_centre"
# fixAdd:"community_centre=youth_centre"
err.append({'class': 9002001, 'subclass': 1284929085, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','community_centre'],
['community_centre','youth_centre']])
}})
# *[building:type][building=yes]
# *[building:type][!building]
if ('building' in keys and 'building:type' in keys) or ('building:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and mapcss._tag_capture(capture_tags, 1, tags, 'building') == mapcss._value_capture(capture_tags, 1, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and not mapcss._tag_capture(capture_tags, 1, tags, 'building'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"building"
# fixChangeKey:"building:type => building"
err.append({'class': 9002001, 'subclass': 1927794430, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building', mapcss.tag(tags, 'building:type')]]),
'-': ([
'building:type'])
}})
# *[building:type][building][building!=yes]
if ('building' in keys and 'building:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and mapcss._tag_capture(capture_tags, 1, tags, 'building') and mapcss._tag_capture(capture_tags, 2, tags, 'building') != mapcss._value_const_capture(capture_tags, 2, 'yes', 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"building"
err.append({'class': 9002001, 'subclass': 1133239698, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[escalator]
if ('escalator' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'escalator'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"highway=steps + conveying=*"
err.append({'class': 9002001, 'subclass': 967271828, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[fenced]
if ('fenced' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fenced'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"barrier=fence"
err.append({'class': 9002001, 'subclass': 1141285220, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[historic_name][!old_name]
if ('historic_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic_name') and not mapcss._tag_capture(capture_tags, 1, tags, 'old_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"old_name"
# fixChangeKey:"historic_name => old_name"
err.append({'class': 9002001, 'subclass': 1034538127, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['old_name', mapcss.tag(tags, 'historic_name')]]),
'-': ([
'historic_name'])
}})
# *[historic_name][old_name]
if ('historic_name' in keys and 'old_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic_name') and mapcss._tag_capture(capture_tags, 1, tags, 'old_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"old_name"
err.append({'class': 9002001, 'subclass': 30762614, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[landuse=field]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'field'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=farmland"
# fixAdd:"landuse=farmland"
err.append({'class': 9002001, 'subclass': 426261497, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['landuse','farmland']])
}})
# *[leisure=beach]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'beach'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=beach_resort"
# suggestAlternative:"natural=beach"
err.append({'class': 9002001, 'subclass': 1767286055, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=club]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'club'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"club=*"
err.append({'class': 9002001, 'subclass': 1282397509, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=video_arcade]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'video_arcade'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=adult_gaming_centre"
# suggestAlternative:"leisure=amusement_arcade"
err.append({'class': 9002001, 'subclass': 1463909830, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[man_made=jetty]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'jetty'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=pier"
# fixAdd:"man_made=pier"
err.append({'class': 9002001, 'subclass': 192707176, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','pier']])
}})
# *[man_made=village_pump]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'village_pump'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=water_well"
# fixAdd:"man_made=water_well"
err.append({'class': 9002001, 'subclass': 423232686, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','water_well']])
}})
# *[man_made=water_tank]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'water_tank'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=storage_tank + content=water"
# fixAdd:"content=water"
# fixAdd:"man_made=storage_tank"
err.append({'class': 9002001, 'subclass': 563629665, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['content','water'],
['man_made','storage_tank']])
}})
# *[natural=moor]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'moor'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=meadow + meadow=agricultural"
# suggestAlternative:"natural=fell"
# suggestAlternative:"natural=grassland"
# suggestAlternative:"natural=heath"
# suggestAlternative:"natural=scrub"
# suggestAlternative:"natural=tundra"
# suggestAlternative:"natural=wetland"
err.append({'class': 9002001, 'subclass': 374637717, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[noexit=no][!fixme]
if ('noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') == mapcss._value_capture(capture_tags, 0, 'no') and not mapcss._tag_capture(capture_tags, 1, tags, 'fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"fixme=continue"
# fixAdd:"fixme=continue"
# fixRemove:"noexit"
err.append({'class': 9002001, 'subclass': 647435126, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['fixme','continue']]),
'-': ([
'noexit'])
}})
# *[noexit=no][fixme]
if ('fixme' in keys and 'noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') == mapcss._value_capture(capture_tags, 0, 'no') and mapcss._tag_capture(capture_tags, 1, tags, 'fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"fixme=continue"
err.append({'class': 9002001, 'subclass': 881828009, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=dive]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'dive'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=scuba_diving"
# fixAdd:"shop=scuba_diving"
err.append({'class': 9002001, 'subclass': 1582968978, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','scuba_diving']])
}})
# *[shop=furnace]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'furnace'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=plumber"
# suggestAlternative:"shop=fireplace"
err.append({'class': 9002001, 'subclass': 1155821104, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=paragliding]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'paragliding'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=free_flying"
# fixAdd:"sport=free_flying"
err.append({'class': 9002001, 'subclass': 1531788430, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','free_flying']])
}})
# *[tourism=bed_and_breakfast]
if ('tourism' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tourism') == mapcss._value_capture(capture_tags, 0, 'bed_and_breakfast'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=guest_house + guest_house=bed_and_breakfast"
# fixAdd:"guest_house=bed_and_breakfast"
# fixAdd:"tourism=guest_house"
err.append({'class': 9002001, 'subclass': 954237438, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['guest_house','bed_and_breakfast'],
['tourism','guest_house']])
}})
# *[diaper=yes]
# *[diaper=no]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper => changing_table"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 1957125311, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table', mapcss.tag(tags, 'diaper')]]),
'-': ([
'diaper'])
}})
# *[diaper][diaper=~/^[1-9][0-9]*$/]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_0f294fdf), mapcss._tag_capture(capture_tags, 1, tags, 'diaper')))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table=yes + changing_table:count=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixAdd:"changing_table=yes"
# fixChangeKey:"diaper => changing_table:count"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 2105051472, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table','yes'],
['changing_table:count', mapcss.tag(tags, 'diaper')]]),
'-': ([
'diaper'])
}})
# *[diaper=room]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'room'))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table=dedicated_room"
# suggestAlternative:"changing_table=room"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 883202329, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[diaper]!.diaper_checked
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_diaper_checked and mapcss._tag_capture(capture_tags, 0, tags, 'diaper'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"changing_table"
err.append({'class': 9002001, 'subclass': 693675339, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[diaper:male=yes]
if ('diaper:male' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:male') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=male_toilet"
# fixAdd:"changing_table:location=male_toilet"
# fixRemove:"diaper:male"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 799035479, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','male_toilet']]),
'-': ([
'diaper:male'])
}})
# *[diaper:female=yes]
if ('diaper:female' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:female') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=female_toilet"
# fixAdd:"changing_table:location=female_toilet"
# fixRemove:"diaper:female"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1450901137, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','female_toilet']]),
'-': ([
'diaper:female'])
}})
# *[diaper:unisex=yes]
if ('diaper:unisex' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:unisex') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=unisex_toilet"
# fixAdd:"changing_table:location=unisex_toilet"
# fixRemove:"diaper:unisex"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1460378712, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','unisex_toilet']]),
'-': ([
'diaper:unisex'])
}})
# *[diaper:wheelchair=yes]
# *[diaper:wheelchair=no]
if ('diaper:wheelchair' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:wheelchair') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:wheelchair') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table:wheelchair=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper:wheelchair => changing_table:wheelchair"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1951967281, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:wheelchair', mapcss.tag(tags, 'diaper:wheelchair')]]),
'-': ([
'diaper:wheelchair'])
}})
# *[diaper:fee=yes]
# *[diaper:fee=no]
if ('diaper:fee' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:fee') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:fee') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table:fee=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper:fee => changing_table:fee"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 2008573526, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:fee', mapcss.tag(tags, 'diaper:fee')]]),
'-': ([
'diaper:fee'])
}})
# *[/^diaper:/]!.diaper___checked
if True:
match = False
if not match:
capture_tags = {}
try: match = (not set_diaper___checked and mapcss._tag_capture(capture_tags, 0, tags, self.re_6029fe03))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","diaper:*")
# suggestAlternative:"changing_table:*"
err.append({'class': 9002001, 'subclass': 26578864, 'text': mapcss.tr('{0} is deprecated', 'diaper:*')})
# *[changing_table][changing_table!~/^(yes|no|limited)$/]
if ('changing_table' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'changing_table') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_787405b1, '^(yes|no|limited)$'), mapcss._tag_capture(capture_tags, 1, tags, 'changing_table')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("wrong value: {0}","{0.tag}")
# suggestAlternative:"changing_table=limited"
# suggestAlternative:"changing_table=no"
# suggestAlternative:"changing_table=yes"
err.append({'class': 9002019, 'subclass': 1965225408, 'text': mapcss.tr('wrong value: {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[roof:shape=half_hipped]
if ('roof:shape' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'roof:shape') == mapcss._value_capture(capture_tags, 0, 'half_hipped'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"roof:shape=half-hipped"
# fixAdd:"roof:shape=half-hipped"
err.append({'class': 9002001, 'subclass': 1548347123, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['roof:shape','half-hipped']])
}})
# *[bridge_name]
if ('bridge_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"bridge:name"
# fixChangeKey:"bridge_name => bridge:name"
err.append({'class': 9002001, 'subclass': 80069399, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:name', mapcss.tag(tags, 'bridge_name')]]),
'-': ([
'bridge_name'])
}})
# *[access=public]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'public'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"access=yes"
# fixAdd:"access=yes"
err.append({'class': 9002001, 'subclass': 1115157097, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['access','yes']])
}})
# *[crossing=island]
if ('crossing' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'crossing') == mapcss._value_capture(capture_tags, 0, 'island'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"crossing:island=yes"
# fixRemove:"crossing"
# fixAdd:"crossing:island=yes"
err.append({'class': 9002001, 'subclass': 1512561318, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['crossing:island','yes']]),
'-': ([
'crossing'])
}})
# *[recycling:metal]
if ('recycling:metal' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'recycling:metal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"recycling:scrap_metal"
# fixChangeKey:"recycling:metal => recycling:scrap_metal"
err.append({'class': 9002001, 'subclass': 474491272, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['recycling:scrap_metal', mapcss.tag(tags, 'recycling:metal')]]),
'-': ([
'recycling:metal'])
}})
# *[shop=dog_grooming]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'dog_grooming'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=pet_grooming"
# fixAdd:"shop=pet_grooming"
err.append({'class': 9002001, 'subclass': 1073412885, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','pet_grooming']])
}})
# *[tower:type=anchor]
# *[tower:type=suspension]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'anchor'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# suggestAlternative:concat("line_attachment=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"tower:type => line_attachment"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 180380605, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_attachment', mapcss.tag(tags, 'tower:type')]]),
'-': ([
'tower:type'])
}})
# *[tower:type=branch][branch:type=split]
# *[tower:type=branch][branch:type=loop]
if ('branch:type' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'split'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'loop'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=split"
# fixRemove:"branch:type"
# fixAdd:"line_management=split"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 362350862, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','split']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=branch][!branch:type]
# *[tower:type=branch][branch:type=tap]
if ('branch:type' in keys and 'tower:type' in keys) or ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and not mapcss._tag_capture(capture_tags, 1, tags, 'branch:type'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'tap'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=branch"
# fixRemove:"branch:type"
# fixAdd:"line_management=branch"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 476423517, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','branch']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=branch][branch:type=cross]
if ('branch:type' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'cross'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=cross"
# fixRemove:"branch:type"
# fixAdd:"line_management=cross"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 2103059531, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','cross']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=termination]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'termination'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=termination"
# fixAdd:"line_management=termination"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 232235847, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','termination']]),
'-': ([
'tower:type'])
}})
# *[tower:type=transition]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'transition'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location:transition=yes"
# fixAdd:"location:transition=yes"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1124904944, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location:transition','yes']]),
'-': ([
'tower:type'])
}})
# *[tower:type=transposing]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'transposing'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=transpose"
# fixAdd:"line_management=transpose"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1795169098, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','transpose']]),
'-': ([
'tower:type'])
}})
# *[tower:type=crossing]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'crossing'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"height=* + design=*"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1301565974, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[tower:type][power][power=~/^(tower|pole|insulator|portal|terminal)$/]!.power_tower_type_warning
if ('power' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_power_tower_type_warning and mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') and mapcss._tag_capture(capture_tags, 1, tags, 'power') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 2, self.re_24dfeb95), mapcss._tag_capture(capture_tags, 2, tags, 'power')))
except mapcss.RuleAbort: pass
if match:
# setgeneric_power_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"design"
# suggestAlternative:"line_attachment"
# suggestAlternative:"line_management"
# suggestAlternative:"structure"
set_generic_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 2020421267, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# node[pole:type=anchor]
# node[pole:type=suspension]
if ('pole:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'anchor'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# setpower_pole_type_warning
# group:tr("deprecated tagging")
# suggestAlternative:concat("line_attachment=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"pole:type => line_attachment"
set_power_pole_type_warning = True
err.append({'class': 9002001, 'subclass': 1925507031, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_attachment', mapcss.tag(tags, 'pole:type')]]),
'-': ([
'pole:type'])
}})
# node[pole:type=branch][branch:type=split]
# node[pole:type=branch][branch:type=loop]
if ('branch:type' in keys and 'pole:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'split'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'loop'))
except mapcss.RuleAbort: pass
if match:
# setpower_pole_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=split"
# fixRemove:"branch:type"
# fixAdd:"line_management=split"
# fixRemove:"pole:type"
set_power_pole_type_warning = True
err.append({'class': 9002001, 'subclass': 1645001021, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','split']]),
'-': ([
'branch:type',
'pole:type'])
}})
# node[pole:type=branch][!branch:type]
# node[pole:type=branch][branch:type=tap]
if ('branch:type' in keys and 'pole:type' in keys) or ('pole:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'branch') and not mapcss._tag_capture(capture_tags, 1, tags, 'branch:type'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'tap'))
except mapcss.RuleAbort: pass
if match:
# setpower_pole_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=branch"
# fixRemove:"branch:type"
# fixAdd:"line_management=branch"
# fixRemove:"pole:type"
set_power_pole_type_warning = True
err.append({'class': 9002001, 'subclass': 686268660, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','branch']]),
'-': ([
'branch:type',
'pole:type'])
}})
# node[pole:type=branch][branch:type=cross]
if ('branch:type' in keys and 'pole:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'cross'))
except mapcss.RuleAbort: pass
if match:
# setpower_pole_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=cross"
# fixRemove:"branch:type"
# fixAdd:"line_management=cross"
# fixRemove:"pole:type"
set_power_pole_type_warning = True
err.append({'class': 9002001, 'subclass': 160459065, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','cross']]),
'-': ([
'branch:type',
'pole:type'])
}})
# node[pole:type=termination]
if ('pole:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'termination'))
except mapcss.RuleAbort: pass
if match:
# setpower_pole_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=termination"
# fixAdd:"line_management=termination"
# fixRemove:"pole:type"
set_power_pole_type_warning = True
err.append({'class': 9002001, 'subclass': 1675908395, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','termination']]),
'-': ([
'pole:type'])
}})
# node[pole:type=transition]
if ('pole:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') == mapcss._value_capture(capture_tags, 0, 'transition'))
except mapcss.RuleAbort: pass
if match:
# setpower_pole_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location:transition=yes"
# fixAdd:"location:transition=yes"
# fixRemove:"pole:type"
set_power_pole_type_warning = True
err.append({'class': 9002001, 'subclass': 1266956723, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location:transition','yes']]),
'-': ([
'pole:type'])
}})
# *[pole:type][power][power=~/^(tower|pole|insulator|portal|terminal)$/]!.power_pole_type_warning!.generic_power_tower_type_warning
if ('pole:type' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_power_pole_type_warning and not set_generic_power_tower_type_warning and mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') and mapcss._tag_capture(capture_tags, 1, tags, 'power') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 2, self.re_24dfeb95), mapcss._tag_capture(capture_tags, 2, tags, 'power')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"line_attachment"
# suggestAlternative:"line_management"
err.append({'class': 9002001, 'subclass': 1513543887, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# node[man_made=pipeline_marker]
# node[pipeline=marker]
# node[power=marker]
# node[cable=marker]
if ('cable' in keys) or ('man_made' in keys) or ('pipeline' in keys) or ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'pipeline_marker'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pipeline') == mapcss._value_capture(capture_tags, 0, 'marker'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'marker'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cable') == mapcss._value_capture(capture_tags, 0, 'marker'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"marker=* + utility=*"
err.append({'class': 9002001, 'subclass': 296597752, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sloped_curb=yes][!kerb]
# *[sloped_curb=both][!kerb]
if ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'yes') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'both') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"kerb=lowered"
# fixAdd:"kerb=lowered"
# fixRemove:"sloped_curb"
err.append({'class': 9002001, 'subclass': 1906002413, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['kerb','lowered']]),
'-': ([
'sloped_curb'])
}})
# *[sloped_curb=no][!kerb]
if ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'no') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"kerb=yes"
# fixAdd:"kerb=yes"
# fixRemove:"sloped_curb"
err.append({'class': 9002001, 'subclass': 893727015, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['kerb','yes']]),
'-': ([
'sloped_curb'])
}})
# *[sloped_curb][sloped_curb!~/^(yes|both|no)$/][!kerb]
# *[sloped_curb][kerb]
if ('kerb' in keys and 'sloped_curb' in keys) or ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_01eb1711, '^(yes|both|no)$'), mapcss._tag_capture(capture_tags, 1, tags, 'sloped_curb')) and not mapcss._tag_capture(capture_tags, 2, tags, 'kerb'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') and mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"kerb=*"
err.append({'class': 9002001, 'subclass': 1682376745, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[unnamed=yes]
if ('unnamed' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'unnamed') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"noname=yes"
# fixChangeKey:"unnamed => noname"
err.append({'class': 9002001, 'subclass': 1901447020, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['noname', mapcss.tag(tags, 'unnamed')]]),
'-': ([
'unnamed'])
}})
# node[segregated][segregated!=yes][segregated!=no]
if ('segregated' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'segregated') and mapcss._tag_capture(capture_tags, 1, tags, 'segregated') != mapcss._value_const_capture(capture_tags, 1, 'yes', 'yes') and mapcss._tag_capture(capture_tags, 2, tags, 'segregated') != mapcss._value_const_capture(capture_tags, 2, 'no', 'no'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("unusual value of {0}","{0.key}")
err.append({'class': 9002020, 'subclass': 1015641959, 'text': mapcss.tr('unusual value of {0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[building:height]
if ('building:height' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:height'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"height"
# fixChangeKey:"building:height => height"
err.append({'class': 9002001, 'subclass': 1328174745, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['height', mapcss.tag(tags, 'building:height')]]),
'-': ([
'building:height'])
}})
# *[building:min_height]
if ('building:min_height' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:min_height'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"min_height"
# fixChangeKey:"building:min_height => min_height"
err.append({'class': 9002001, 'subclass': 1042683921, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['min_height', mapcss.tag(tags, 'building:min_height')]]),
'-': ([
'building:min_height'])
}})
# *[car][amenity=charging_station]
if ('amenity' in keys and 'car' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'car') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') == mapcss._value_capture(capture_tags, 1, 'charging_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"motorcar"
# fixChangeKey:"car => motorcar"
err.append({'class': 9002001, 'subclass': 1165117414, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['motorcar', mapcss.tag(tags, 'car')]]),
'-': ([
'car'])
}})
# *[navigationaid=approach_light]
# *[navigationaid="ALS (Approach lighting system)"]
if ('navigationaid' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'navigationaid') == mapcss._value_capture(capture_tags, 0, 'approach_light'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'navigationaid') == mapcss._value_capture(capture_tags, 0, 'ALS (Approach lighting system)'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"navigationaid=als"
# fixAdd:"navigationaid=als"
err.append({'class': 9002001, 'subclass': 1577817081, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['navigationaid','als']])
}})
# node[exit_to]
if ('exit_to' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'exit_to'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"destination"
err.append({'class': 9002001, 'subclass': 2117439762, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[water=riverbank][!natural]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'riverbank') and not mapcss._tag_capture(capture_tags, 1, tags, 'natural'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=water + water=river"
# fixAdd:"natural=water"
# fixAdd:"water=river"
err.append({'class': 9002001, 'subclass': 186872153, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','water'],
['water','river']])
}})
# *[water=riverbank][natural]
if ('natural' in keys and 'water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'riverbank') and mapcss._tag_capture(capture_tags, 1, tags, 'natural'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=water + water=river"
err.append({'class': 9002001, 'subclass': 630806094, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# node[amenity=bench][capacity][!seats]
if ('amenity' in keys and 'capacity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'bench') and mapcss._tag_capture(capture_tags, 1, tags, 'capacity') and not mapcss._tag_capture(capture_tags, 2, tags, 'seats'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"seats"
# fixChangeKey:"capacity => seats"
err.append({'class': 9002001, 'subclass': 417580324, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['seats', mapcss.tag(tags, 'capacity')]]),
'-': ([
'capacity'])
}})
# node[amenity=bench][capacity][seats]
if ('amenity' in keys and 'capacity' in keys and 'seats' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'bench') and mapcss._tag_capture(capture_tags, 1, tags, 'capacity') and mapcss._tag_capture(capture_tags, 2, tags, 'seats'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"seats"
err.append({'class': 9002001, 'subclass': 2124584560, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=lamps]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'lamps'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=lighting"
# fixAdd:"shop=lighting"
err.append({'class': 9002001, 'subclass': 746886011, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','lighting']])
}})
# *[access=customer]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'customer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"access=customers"
# fixAdd:"access=customers"
err.append({'class': 9002001, 'subclass': 1040065637, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['access','customers']])
}})
# *[addr:inclusion=estimated]
if ('addr:inclusion' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'addr:inclusion') == mapcss._value_capture(capture_tags, 0, 'estimated'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"addr:inclusion=estimate"
# fixAdd:"addr:inclusion=estimate"
err.append({'class': 9002001, 'subclass': 1002643753, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['addr:inclusion','estimate']])
}})
# *[building=apartment]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'apartment'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"building=apartments"
# fixAdd:"building=apartments"
err.append({'class': 9002001, 'subclass': 1384168519, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','apartments']])
}})
# node[lamp_mount="bent mast"]
if ('lamp_mount' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lamp_mount') == mapcss._value_capture(capture_tags, 0, 'bent mast'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"lamp_mount=bent_mast"
# fixAdd:"lamp_mount=bent_mast"
err.append({'class': 9002001, 'subclass': 653926228, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['lamp_mount','bent_mast']])
}})
# node[lamp_mount="straight mast"]
if ('lamp_mount' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lamp_mount') == mapcss._value_capture(capture_tags, 0, 'straight mast'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"lamp_mount=straight_mast"
# fixAdd:"lamp_mount=straight_mast"
err.append({'class': 9002001, 'subclass': 2015439082, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['lamp_mount','straight_mast']])
}})
# node[lamp_type=electrical]
if ('lamp_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lamp_type') == mapcss._value_capture(capture_tags, 0, 'electrical'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"lamp_type=electric"
# fixAdd:"lamp_type=electric"
err.append({'class': 9002001, 'subclass': 237309553, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['lamp_type','electric']])
}})
# *[generator:type=solar_photovoltaic_panels]
if ('generator:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:type') == mapcss._value_capture(capture_tags, 0, 'solar_photovoltaic_panels'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:type=solar_photovoltaic_panel"
# fixAdd:"generator:type=solar_photovoltaic_panel"
err.append({'class': 9002001, 'subclass': 1146719875, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:type','solar_photovoltaic_panel']])
}})
# *[building=part]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'part'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"building:part=yes"
err.append({'class': 9002001, 'subclass': 455695847, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=sink_hole]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'sink_hole'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=sinkhole"
# fixAdd:"natural=sinkhole"
err.append({'class': 9002001, 'subclass': 1283355945, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','sinkhole']])
}})
# *[climbing:grade:UIAA:min]
if ('climbing:grade:UIAA:min' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:min'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:min"
# fixChangeKey:"climbing:grade:UIAA:min => climbing:grade:uiaa:min"
err.append({'class': 9002001, 'subclass': 1408052420, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:min', mapcss.tag(tags, 'climbing:grade:UIAA:min')]]),
'-': ([
'climbing:grade:UIAA:min'])
}})
# *[climbing:grade:UIAA:max]
if ('climbing:grade:UIAA:max' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:max'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:max"
# fixChangeKey:"climbing:grade:UIAA:max => climbing:grade:uiaa:max"
err.append({'class': 9002001, 'subclass': 1866245426, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:max', mapcss.tag(tags, 'climbing:grade:UIAA:max')]]),
'-': ([
'climbing:grade:UIAA:max'])
}})
# *[climbing:grade:UIAA:mean]
if ('climbing:grade:UIAA:mean' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:mean'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:mean"
# fixChangeKey:"climbing:grade:UIAA:mean => climbing:grade:uiaa:mean"
err.append({'class': 9002001, 'subclass': 1022648087, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:mean', mapcss.tag(tags, 'climbing:grade:UIAA:mean')]]),
'-': ([
'climbing:grade:UIAA:mean'])
}})
# *[climbing:grade:UIAA]
if ('climbing:grade:UIAA' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa"
# fixChangeKey:"climbing:grade:UIAA => climbing:grade:uiaa"
err.append({'class': 9002001, 'subclass': 1007893519, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa', mapcss.tag(tags, 'climbing:grade:UIAA')]]),
'-': ([
'climbing:grade:UIAA'])
}})
# *[cuisine][cuisine=~/^(?i)(bbq)$/]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_2f881233), mapcss._tag_capture(capture_tags, 1, tags, 'cuisine')))
except mapcss.RuleAbort: pass
if match:
# setbbq_autofix
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"cuisine=barbecue"
# fixAdd:"cuisine=barbecue"
set_bbq_autofix = True
err.append({'class': 9002001, 'subclass': 1943338875, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['cuisine','barbecue']])
}})
# *[cuisine=~/(?i)(;bbq|bbq;)/][cuisine!~/(?i)(_bbq)/]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_340a2b31), mapcss._tag_capture(capture_tags, 0, tags, 'cuisine')) and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_7d409ed5, '(?i)(_bbq)'), mapcss._tag_capture(capture_tags, 1, tags, 'cuisine')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","cuisine=bbq")
# suggestAlternative:"cuisine=barbecue"
err.append({'class': 9002001, 'subclass': 1958782130, 'text': mapcss.tr('{0} is deprecated', 'cuisine=bbq')})
# *[Fixme]
if ('Fixme' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'Fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"fixme"
# fixChangeKey:"Fixme => fixme"
err.append({'class': 9002001, 'subclass': 592643943, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['fixme', mapcss.tag(tags, 'Fixme')]]),
'-': ([
'Fixme'])
}})
# *[amenity=embassy]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'embassy'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=diplomatic + diplomatic=embassy"
# fixChangeKey:"amenity => diplomatic"
# fixAdd:"office=diplomatic"
err.append({'class': 9002001, 'subclass': 1751915206, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['diplomatic', mapcss.tag(tags, 'amenity')],
['office','diplomatic']]),
'-': ([
'amenity'])
}})
return err
def way(self, data, tags, nds):
capture_tags = {}
keys = tags.keys()
err = []
set_bbq_autofix = set_diaper___checked = set_diaper_checked = set_generic_power_tower_type_warning = set_power_pole_type_warning = set_power_tower_type_warning = set_samecolor = False
# *[barrier=wire_fence]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wire_fence'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=fence + fence_type=chain_link"
# fixAdd:"barrier=fence"
# fixAdd:"fence_type=chain_link"
# assertNoMatch:"way barrier=fence"
# assertMatch:"way barrier=wire_fence"
err.append({'class': 9002001, 'subclass': 1107799632, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','fence'],
['fence_type','chain_link']])
}})
# *[barrier=wood_fence]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wood_fence'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=fence + fence_type=wood"
# fixAdd:"barrier=fence"
# fixAdd:"fence_type=wood"
err.append({'class': 9002001, 'subclass': 1412230714, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','fence'],
['fence_type','wood']])
}})
# way[highway=ford]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'ford'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"highway=* + ford=yes"
err.append({'class': 9002001, 'subclass': 591931361, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[class]
if ('class' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'class'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"highway"
err.append({'class': 9002001, 'subclass': 905310794, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[highway=stile]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'stile'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=stile"
# fixAdd:"barrier=stile"
# fixRemove:"highway"
err.append({'class': 9002001, 'subclass': 1435678043, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','stile']]),
'-': ([
'highway'])
}})
# *[highway=incline]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'incline'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 765169083, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[highway=incline_steep]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'incline_steep'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 1966772390, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[highway=unsurfaced]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'unsurfaced'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"highway=* + surface=unpaved"
# fixAdd:"highway=road"
# fixAdd:"surface=unpaved"
err.append({'class': 9002001, 'subclass': 20631498, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['highway','road'],
['surface','unpaved']])
}})
# *[landuse=wood]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'wood'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=forest"
# suggestAlternative:"natural=wood"
err.append({'class': 9002001, 'subclass': 469903103, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=marsh]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'marsh'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=wetland + wetland=marsh"
# fixAdd:"natural=wetland"
# fixAdd:"wetland=marsh"
err.append({'class': 9002001, 'subclass': 1459865523, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','wetland'],
['wetland','marsh']])
}})
# *[highway=byway]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'byway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
err.append({'class': 9002001, 'subclass': 1844620979, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[power_source]
if ('power_source' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power_source'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"generator:source"
err.append({'class': 9002001, 'subclass': 34751027, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[power_rating]
if ('power_rating' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power_rating'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"generator:output"
err.append({'class': 9002001, 'subclass': 904750343, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[shop=antique]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'antique'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=antiques"
# fixAdd:"shop=antiques"
err.append({'class': 9002001, 'subclass': 596668979, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','antiques']])
}})
# *[shop=bags]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'bags'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=bag"
# fixAdd:"shop=bag"
err.append({'class': 9002001, 'subclass': 1709003584, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','bag']])
}})
# *[shop=fashion]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fashion'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=clothes"
# fixAdd:"shop=clothes"
err.append({'class': 9002001, 'subclass': 985619804, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','clothes']])
}})
# *[shop=organic]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'organic'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=* + organic=only"
# suggestAlternative:"shop=* + organic=yes"
err.append({'class': 9002001, 'subclass': 1959365145, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=pets]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'pets'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=pet"
# fixAdd:"shop=pet"
err.append({'class': 9002001, 'subclass': 290270098, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','pet']])
}})
# *[shop=pharmacy]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'pharmacy'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=pharmacy"
# fixChangeKey:"shop => amenity"
err.append({'class': 9002001, 'subclass': 350722657, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'shop')]]),
'-': ([
'shop'])
}})
# *[bicycle_parking=sheffield]
if ('bicycle_parking' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bicycle_parking') == mapcss._value_capture(capture_tags, 0, 'sheffield'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bicycle_parking=stands"
# fixAdd:"bicycle_parking=stands"
err.append({'class': 9002001, 'subclass': 718874663, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bicycle_parking','stands']])
}})
# *[amenity=emergency_phone]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'emergency_phone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=phone"
# fixRemove:"amenity"
# fixAdd:"emergency=phone"
err.append({'class': 9002001, 'subclass': 1108230656, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency','phone']]),
'-': ([
'amenity'])
}})
# *[sport=gaelic_football]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'gaelic_football'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=gaelic_games"
# fixAdd:"sport=gaelic_games"
err.append({'class': 9002001, 'subclass': 1768681881, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','gaelic_games']])
}})
# *[power=station]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=plant"
# suggestAlternative:"power=substation"
err.append({'class': 9002001, 'subclass': 52025933, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[power=sub_station]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'sub_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=substation"
# fixAdd:"power=substation"
err.append({'class': 9002001, 'subclass': 1423074682, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['power','substation']])
}})
# *[location=rooftop]
if ('location' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'location') == mapcss._value_capture(capture_tags, 0, 'rooftop'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location=roof"
# fixAdd:"location=roof"
err.append({'class': 9002001, 'subclass': 1028577225, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location','roof']])
}})
# *[generator:location]
if ('generator:location' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:location'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"location"
# fixChangeKey:"generator:location => location"
err.append({'class': 9002001, 'subclass': 900615917, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location', mapcss.tag(tags, 'generator:location')]]),
'-': ([
'generator:location'])
}})
# *[generator:method=dam]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'dam'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-storage"
# fixAdd:"generator:method=water-storage"
err.append({'class': 9002001, 'subclass': 248819368, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-storage']])
}})
# *[generator:method=pumped-storage]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'pumped-storage'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-pumped-storage"
# fixAdd:"generator:method=water-pumped-storage"
err.append({'class': 9002001, 'subclass': 93454158, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-pumped-storage']])
}})
# *[generator:method=pumping]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'pumping'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-pumped-storage"
# fixAdd:"generator:method=water-pumped-storage"
err.append({'class': 9002001, 'subclass': 2115673716, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-pumped-storage']])
}})
# *[fence_type=chain]
if ('fence_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fence_type') == mapcss._value_capture(capture_tags, 0, 'chain'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=chain"
# suggestAlternative:"barrier=fence + fence_type=chain_link"
err.append({'class': 9002001, 'subclass': 19409288, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=entrance]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'entrance'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"entrance"
err.append({'class': 9002001, 'subclass': 306662985, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[board_type=board]
if ('board_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'board_type') == mapcss._value_capture(capture_tags, 0, 'board'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixRemove:"board_type"
err.append({'class': 9002001, 'subclass': 1150949316, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'board_type'])
}})
# *[man_made=measurement_station]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'measurement_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=monitoring_station"
# fixAdd:"man_made=monitoring_station"
err.append({'class': 9002001, 'subclass': 700465123, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','monitoring_station']])
}})
# *[measurement=water_level]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'water_level'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:water_level=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:water_level=yes"
err.append({'class': 9002001, 'subclass': 634647702, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:water_level','yes']]),
'-': ([
'measurement'])
}})
# *[measurement=weather]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'weather'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:weather=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:weather=yes"
err.append({'class': 9002001, 'subclass': 336627227, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:weather','yes']]),
'-': ([
'measurement'])
}})
# *[measurement=seismic]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'seismic'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:seismic_activity=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:seismic_activity=yes"
err.append({'class': 9002001, 'subclass': 1402131289, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:seismic_activity','yes']]),
'-': ([
'measurement'])
}})
# *[monitoring:river_level]
if ('monitoring:river_level' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'monitoring:river_level'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"monitoring:water_level"
# fixChangeKey:"monitoring:river_level => monitoring:water_level"
err.append({'class': 9002001, 'subclass': 264907924, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:water_level', mapcss.tag(tags, 'monitoring:river_level')]]),
'-': ([
'monitoring:river_level'])
}})
# *[stay]
if ('stay' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'stay'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"maxstay"
# fixChangeKey:"stay => maxstay"
err.append({'class': 9002001, 'subclass': 787370129, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['maxstay', mapcss.tag(tags, 'stay')]]),
'-': ([
'stay'])
}})
# *[emergency=aed]
if ('emergency' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'emergency') == mapcss._value_capture(capture_tags, 0, 'aed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=defibrillator"
# fixAdd:"emergency=defibrillator"
err.append({'class': 9002001, 'subclass': 707111885, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency','defibrillator']])
}})
# *[day_on][!restriction]
# *[day_off][!restriction]
# *[date_on][!restriction]
# *[date_off][!restriction]
# *[hour_on][!restriction]
# *[hour_off][!restriction]
if ('date_off' in keys) or ('date_on' in keys) or ('day_off' in keys) or ('day_on' in keys) or ('hour_off' in keys) or ('hour_on' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'day_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'day_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'date_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'date_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hour_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hour_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"*:conditional"
err.append({'class': 9002001, 'subclass': 294264920, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[access=designated]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'designated'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("''{0}'' is meaningless, use more specific tags, e.g. ''{1}''","access=designated","bicycle=designated")
# assertMatch:"way access=designated"
err.append({'class': 9002002, 'subclass': 2057594338, 'text': mapcss.tr('\'\'{0}\'\' is meaningless, use more specific tags, e.g. \'\'{1}\'\'', 'access=designated', 'bicycle=designated')})
# *[access=official]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'official'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("''{0}'' does not specify the official mode of transportation, use ''{1}'' for example","access=official","bicycle=official")
# assertMatch:"way access=official"
err.append({'class': 9002003, 'subclass': 1909133836, 'text': mapcss.tr('\'\'{0}\'\' does not specify the official mode of transportation, use \'\'{1}\'\' for example', 'access=official', 'bicycle=official')})
# *[fixme=yes]
# *[FIXME=yes]
if ('FIXME' in keys) or ('fixme' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fixme') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'FIXME') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0}={1} is unspecific. Instead of ''{1}'' please give more information about what exactly should be fixed.","{0.key}","{0.value}")
# assertMatch:"way fixme=yes"
err.append({'class': 9002004, 'subclass': 136657482, 'text': mapcss.tr('{0}={1} is unspecific. Instead of \'\'{1}\'\' please give more information about what exactly should be fixed.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[name][name=~/^(?i)fixme$/]
if ('name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'name') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_1f92073a), mapcss._tag_capture(capture_tags, 1, tags, 'name')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.","{0.key}","{0.tag}")
# fixRemove:"name"
err.append({'class': 9002005, 'subclass': 642340557, 'text': mapcss.tr('Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'name'])
}})
# *[note][note=~/^(?i)fixme$/]
if ('note' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'note') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_1f92073a), mapcss._tag_capture(capture_tags, 1, tags, 'note')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.","{0.tag}")
err.append({'class': 9002006, 'subclass': 1243120287, 'text': mapcss.tr('{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[type=broad_leaved]
# *[type=broad_leafed]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'broad_leaved'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'broad_leafed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=broadleaved"
# fixAdd:"leaf_type=broadleaved"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 293968062, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','broadleaved']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[wood=coniferous]
# *[type=coniferous]
# *[type=conifer]
if ('type' in keys) or ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'coniferous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'coniferous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'conifer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=needleleaved"
# fixAdd:"leaf_type=needleleaved"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 50517650, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','needleleaved']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[wood=mixed]
if ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'mixed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=mixed"
# fixAdd:"leaf_type=mixed"
# fixRemove:"wood"
err.append({'class': 9002001, 'subclass': 235914603, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','mixed']]),
'-': ([
'wood'])
}})
# *[wood=evergreen]
# *[type=evergreen]
if ('type' in keys) or ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'evergreen'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'evergreen'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_cycle=evergreen"
# fixAdd:"leaf_cycle=evergreen"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 747964532, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_cycle','evergreen']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[type=deciduous]
# *[type=deciduos]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'deciduous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'deciduos'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_cycle=deciduous"
# fixAdd:"leaf_cycle=deciduous"
# fixRemove:"type"
err.append({'class': 9002001, 'subclass': 591116099, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_cycle','deciduous']]),
'-': ([
'type'])
}})
# *[wood=deciduous]
if ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'deciduous'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type + leaf_cycle"
err.append({'class': 9002001, 'subclass': 1100223594, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[type=palm]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'palm'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type"
# suggestAlternative:"species"
# suggestAlternative:"trees"
err.append({'class': 9002001, 'subclass': 1757132153, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=land]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'land'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Please use instead a multipolygon.","{0.tag}")
# assertMatch:"way natural=land"
err.append({'class': 9002001, 'subclass': 94558529, 'text': mapcss.tr('{0} is deprecated. Please use instead a multipolygon.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=causeway]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'causeway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=low_water_crossing"
# suggestAlternative:"embankment=yes"
# suggestAlternative:"ford=yes"
err.append({'class': 9002001, 'subclass': 461671124, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=swing]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'swing'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge:movable=swing"
# suggestAlternative:"bridge:structure=simple-suspension"
err.append({'class': 9002001, 'subclass': 1047428067, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=suspension]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=yes + bridge:structure=suspension"
# fixAdd:"bridge:structure=suspension"
# fixAdd:"bridge=yes"
err.append({'class': 9002001, 'subclass': 1157046268, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:structure','suspension'],
['bridge','yes']])
}})
# *[bridge=pontoon]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'pontoon'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=yes + bridge:structure=floating"
# fixAdd:"bridge:structure=floating"
# fixAdd:"bridge=yes"
err.append({'class': 9002001, 'subclass': 1195531951, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:structure','floating'],
['bridge','yes']])
}})
# *[fee=interval]
# *[lit=interval]
# *[supervised=interval]
if ('fee' in keys) or ('lit' in keys) or ('supervised' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fee') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lit') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'supervised') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Please specify interval by using opening_hours syntax","{0.tag}")
err.append({'class': 9002001, 'subclass': 417886592, 'text': mapcss.tr('{0} is deprecated. Please specify interval by using opening_hours syntax', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[/josm\/ignore/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_5ee0acf2))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwError:tr("{0} is deprecated. Please delete this object and use a private layer instead","{0.key}")
# fixDeleteObject:this
err.append({'class': 9002001, 'subclass': 1402743016, 'text': mapcss.tr('{0} is deprecated. Please delete this object and use a private layer instead', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[sport=diving]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'diving'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=cliff_diving"
# suggestAlternative:"sport=scuba_diving"
err.append({'class': 9002001, 'subclass': 590643159, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[parking=park_and_ride]
if ('parking' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'parking') == mapcss._value_capture(capture_tags, 0, 'park_and_ride'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=parking + park_ride=yes"
# fixAdd:"amenity=parking"
# fixAdd:"park_ride=yes"
# fixRemove:"parking"
err.append({'class': 9002001, 'subclass': 1893516041, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','parking'],
['park_ride','yes']]),
'-': ([
'parking'])
}})
# *[playground=yes]
# *[manhole=plain]
# *[manhole=unknown]
# *[manhole=yes]
# *[police=yes]
# *[traffic_calming=yes]
# *[access=restricted]
# *[barrier=yes]
# *[aerialway=yes][!public_transport]
# *[amenity=yes]
# *[leisure=yes]
# *[shop="*"]
# *[shop=yes][amenity!=fuel]
# *[craft=yes]
# *[service=yes]
# *[place=yes]
if ('access' in keys) or ('aerialway' in keys) or ('amenity' in keys) or ('barrier' in keys) or ('craft' in keys) or ('leisure' in keys) or ('manhole' in keys) or ('place' in keys) or ('playground' in keys) or ('police' in keys) or ('service' in keys) or ('shop' in keys) or ('traffic_calming' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'playground') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'plain'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'police') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'traffic_calming') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'restricted'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'aerialway') == mapcss._value_capture(capture_tags, 0, 'yes') and not mapcss._tag_capture(capture_tags, 1, tags, 'public_transport'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, '*'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'yes') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') != mapcss._value_const_capture(capture_tags, 1, 'fuel', 'fuel'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'craft') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'service') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0}={1} is unspecific. Please replace ''{1}'' by a specific value.","{0.key}","{0.value}")
err.append({'class': 9002007, 'subclass': 727505823, 'text': mapcss.tr('{0}={1} is unspecific. Please replace \'\'{1}\'\' by a specific value.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[place_name][!name]
if ('place_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place_name') and not mapcss._tag_capture(capture_tags, 1, tags, 'name'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} should be replaced with {1}","{0.key}","{1.key}")
# fixChangeKey:"place_name => name"
err.append({'class': 9002008, 'subclass': 1089331760, 'text': mapcss.tr('{0} should be replaced with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['name', mapcss.tag(tags, 'place_name')]]),
'-': ([
'place_name'])
}})
# *[place][place_name=*name]
if ('place' in keys and 'place_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place') and mapcss._tag_capture(capture_tags, 1, tags, 'place_name') == mapcss._value_capture(capture_tags, 1, mapcss.tag(tags, 'name')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} = {1}; remove {0}","{1.key}","{1.value}")
# fixRemove:"{1.key}"
err.append({'class': 9002009, 'subclass': 1116761280, 'text': mapcss.tr('{0} = {1}; remove {0}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{1.value}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{1.key}')])
}})
# way[sidewalk=yes]
if ('sidewalk' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sidewalk') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is unspecific","{0.tag}")
# suggestAlternative:"sidewalk=both"
# suggestAlternative:"sidewalk=left"
# suggestAlternative:"sidewalk=right"
# suggestAlternative:"sidewalk=separate"
err.append({'class': 9002021, 'subclass': 36539821, 'text': mapcss.tr('{0} is unspecific', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[waterway=water_point]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'water_point'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=water_point"
# fixChangeKey:"waterway => amenity"
err.append({'class': 9002001, 'subclass': 103347605, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'waterway')]]),
'-': ([
'waterway'])
}})
# *[waterway=waste_disposal]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'waste_disposal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=waste_disposal"
# fixChangeKey:"waterway => amenity"
err.append({'class': 9002001, 'subclass': 1963461348, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'waterway')]]),
'-': ([
'waterway'])
}})
# *[waterway=mooring]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'mooring'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"mooring=yes"
# fixAdd:"mooring=yes"
# fixRemove:"waterway"
err.append({'class': 9002001, 'subclass': 81358738, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['mooring','yes']]),
'-': ([
'waterway'])
}})
# *[building][levels]
# *[building:part=yes][levels]
if ('building' in keys and 'levels' in keys) or ('building:part' in keys and 'levels' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') and mapcss._tag_capture(capture_tags, 1, tags, 'levels'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:part') == mapcss._value_capture(capture_tags, 0, 'yes') and mapcss._tag_capture(capture_tags, 1, tags, 'levels'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.key}")
# suggestAlternative:"building:levels"
# fixChangeKey:"levels => building:levels"
err.append({'class': 9002001, 'subclass': 293177436, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building:levels', mapcss.tag(tags, 'levels')]]),
'-': ([
'levels'])
}})
# *[protected_class]
if ('protected_class' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'protected_class'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"protect_class"
# fixChangeKey:"protected_class => protect_class"
err.append({'class': 9002001, 'subclass': 716999373, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['protect_class', mapcss.tag(tags, 'protected_class')]]),
'-': ([
'protected_class'])
}})
# *[kerb=unknown]
# *[lock=unknown]
# *[hide=unknown]
# *[shelter=unknown]
# *[access=unknown]
# *[capacity:parent=unknown]
# *[capacity:women=unknown]
# *[capacity:disabled=unknown]
# *[crossing=unknown]
# *[foot=unknown]
if ('access' in keys) or ('capacity:disabled' in keys) or ('capacity:parent' in keys) or ('capacity:women' in keys) or ('crossing' in keys) or ('foot' in keys) or ('hide' in keys) or ('kerb' in keys) or ('lock' in keys) or ('shelter' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kerb') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lock') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hide') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shelter') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:parent') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:women') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:disabled') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'crossing') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'foot') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Unspecific tag {0}","{0.tag}")
err.append({'class': 9002010, 'subclass': 1052866123, 'text': mapcss.tr('Unspecific tag {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=skiing]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'skiing'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("Definition of {0} is unclear","{0.tag}")
# suggestAlternative:tr("{0} + {1} + {2}","piste:type=*","piste:difficulty=*","piste:grooming=*")
err.append({'class': 9002001, 'subclass': 1578959559, 'text': mapcss.tr('Definition of {0} is unclear', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[waterway=wadi]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'wadi'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=valley"
# suggestAlternative:"{0.key}=* + intermittent=yes"
err.append({'class': 9002001, 'subclass': 719234223, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[oneway=1]
if ('oneway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'oneway') == mapcss._value_capture(capture_tags, 0, 1))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"oneway=yes"
# fixAdd:"oneway=yes"
err.append({'class': 9002001, 'subclass': 1628124317, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['oneway','yes']])
}})
# way[oneway=-1]
if ('oneway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'oneway') == mapcss._value_capture(capture_tags, 0, -1))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is not recommended. Use the Reverse Ways function from the Tools menu.","{0.tag}")
err.append({'class': 9002016, 'subclass': 579355135, 'text': mapcss.tr('{0} is not recommended. Use the Reverse Ways function from the Tools menu.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[drinkable]
if ('drinkable' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'drinkable'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"drinking_water"
err.append({'class': 9002001, 'subclass': 1785584789, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[color][!colour]
if ('color' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'color') and not mapcss._tag_capture(capture_tags, 1, tags, 'colour'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"colour"
# fixChangeKey:"color => colour"
# assertNoMatch:"way color=red colour=red"
# assertMatch:"way color=red"
err.append({'class': 9002001, 'subclass': 1850270072, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['colour', mapcss.tag(tags, 'color')]]),
'-': ([
'color'])
}})
# *[color][colour][color=*colour]
if ('color' in keys and 'colour' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'color') and mapcss._tag_capture(capture_tags, 1, tags, 'colour') and mapcss._tag_capture(capture_tags, 2, tags, 'color') == mapcss._value_capture(capture_tags, 2, mapcss.tag(tags, 'colour')))
except mapcss.RuleAbort: pass
if match:
# setsamecolor
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1}","{0.key}","{1.key}")
# suggestAlternative:"colour"
# fixRemove:"color"
# assertNoMatch:"way color=red colour=green"
# assertMatch:"way color=red colour=red"
set_samecolor = True
err.append({'class': 9002001, 'subclass': 1825345743, 'text': mapcss.tr('{0} together with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
'color'])
}})
# *[color][colour]!.samecolor
if ('color' in keys and 'colour' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_samecolor and mapcss._tag_capture(capture_tags, 0, tags, 'color') and mapcss._tag_capture(capture_tags, 1, tags, 'colour'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1} and conflicting values","{0.key}","{1.key}")
# suggestAlternative:"colour"
# assertMatch:"way color=red colour=green"
# assertNoMatch:"way color=red colour=red"
err.append({'class': 9002001, 'subclass': 1064658218, 'text': mapcss.tr('{0} together with {1} and conflicting values', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}'))})
# *[building:color][building:colour]!.samebuildingcolor
# Rule Blacklisted
# *[roof:color][roof:colour]!.sameroofcolor
# Rule Blacklisted
# *[/:color/][!building:color][!roof:color][!gpxd:color]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_554de4c7) and not mapcss._tag_capture(capture_tags, 1, tags, 'building:color') and not mapcss._tag_capture(capture_tags, 2, tags, 'roof:color') and not mapcss._tag_capture(capture_tags, 3, tags, 'gpxd:color'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:":colour"
# assertNoMatch:"way color=red"
# assertMatch:"way cycleway:surface:color=grey"
# assertNoMatch:"way roof:color=grey"
err.append({'class': 9002001, 'subclass': 1632389707, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/color:/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_0c5b5730))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"colour:"
# assertMatch:"way color:back=grey"
# assertNoMatch:"way color=red"
err.append({'class': 9002001, 'subclass': 1390370717, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/=|\+|\/|&|<|>|;|'|"|%|#|@|\\|,|\.|\{|\}|\?|\*|\^|\$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_620f4d52))
except mapcss.RuleAbort: pass
if match:
# group:tr("key with uncommon character")
# throwWarning:tr("{0}","{0.key}")
err.append({'class': 9002011, 'subclass': 1752615188, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/^.$/]
# way[/^..$/][route=ferry][!to]
# way[/^..$/][route!=ferry]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_27210286))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_34c15d62) and mapcss._tag_capture(capture_tags, 1, tags, 'route') == mapcss._value_capture(capture_tags, 1, 'ferry') and not mapcss._tag_capture(capture_tags, 2, tags, 'to'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_34c15d62) and mapcss._tag_capture(capture_tags, 1, tags, 'route') != mapcss._value_const_capture(capture_tags, 1, 'ferry', 'ferry'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("uncommon short key")
# assertNoMatch:"way to=Zuidschermer;Akersloot route=ferry"
# assertMatch:"way to=bar"
err.append({'class': 9002012, 'subclass': 1765060211, 'text': mapcss.tr('uncommon short key')})
# *[sport=hockey]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'hockey'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=field_hockey"
# suggestAlternative:"sport=ice_hockey"
err.append({'class': 9002001, 'subclass': 651933474, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=billard]
# *[sport=billards]
# *[sport=billiard]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billard'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billards'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billiard'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=billiards"
# fixAdd:"sport=billiards"
err.append({'class': 9002001, 'subclass': 1522897824, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','billiards']])
}})
# *[payment:credit_cards=yes]
if ('payment:credit_cards' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:credit_cards') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:mastercard=yes","payment:visa=yes")
err.append({'class': 9002013, 'subclass': 705181097, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:mastercard=yes', 'payment:visa=yes')})
# *[payment:debit_cards=yes]
if ('payment:debit_cards' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:debit_cards') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:maestro=yes","payment:girocard=yes")
err.append({'class': 9002013, 'subclass': 679215558, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:maestro=yes', 'payment:girocard=yes')})
# *[payment:electronic_purses=yes]
if ('payment:electronic_purses' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:electronic_purses') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:ep_geldkarte=yes","payment:ep_quick=yes")
err.append({'class': 9002013, 'subclass': 1440457244, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:ep_geldkarte=yes', 'payment:ep_quick=yes')})
# *[payment:cryptocurrencies=yes]
if ('payment:cryptocurrencies' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:cryptocurrencies') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:bitcoin=yes","payment:litecoin=yes")
err.append({'class': 9002013, 'subclass': 1325255949, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:bitcoin=yes', 'payment:litecoin=yes')})
# *[payment:ep_quick]
# *[payment:ep_cash]
# *[payment:ep_proton]
# *[payment:ep_chipknip]
if ('payment:ep_cash' in keys) or ('payment:ep_chipknip' in keys) or ('payment:ep_proton' in keys) or ('payment:ep_quick' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_quick'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_cash'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_proton'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_chipknip'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 332575437, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[kp][railway!=milestone]
if ('kp' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kp') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') != mapcss._value_const_capture(capture_tags, 1, 'milestone', 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"distance"
# fixChangeKey:"kp => distance"
err.append({'class': 9002001, 'subclass': 1256703107, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['distance', mapcss.tag(tags, 'kp')]]),
'-': ([
'kp'])
}})
# *[pk][railway!=milestone]
if ('pk' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pk') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') != mapcss._value_const_capture(capture_tags, 1, 'milestone', 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"distance"
# fixChangeKey:"pk => distance"
err.append({'class': 9002001, 'subclass': 1339969759, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['distance', mapcss.tag(tags, 'pk')]]),
'-': ([
'pk'])
}})
# *[kp][railway=milestone]
if ('kp' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kp') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"railway:position"
# fixChangeKey:"kp => railway:position"
err.append({'class': 9002001, 'subclass': 1667272140, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'kp')]]),
'-': ([
'kp'])
}})
# *[pk][railway=milestone]
if ('pk' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pk') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"railway:position"
# fixChangeKey:"pk => railway:position"
err.append({'class': 9002001, 'subclass': 691355164, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'pk')]]),
'-': ([
'pk'])
}})
# *[distance][railway=milestone]
if ('distance' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'distance') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"railway:position"
# fixChangeKey:"distance => railway:position"
err.append({'class': 9002001, 'subclass': 113691181, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'distance')]]),
'-': ([
'distance'])
}})
# *[postcode]
if ('postcode' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'postcode'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"addr:postcode"
# suggestAlternative:"postal_code"
err.append({'class': 9002001, 'subclass': 1942523538, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[water=intermittent]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'intermittent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"intermittent=yes"
# fixAdd:"intermittent=yes"
# fixRemove:"water"
err.append({'class': 9002001, 'subclass': 813530321, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['intermittent','yes']]),
'-': ([
'water'])
}})
# way[type][type!=waterway][man_made=pipeline]
if ('man_made' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') and mapcss._tag_capture(capture_tags, 1, tags, 'type') != mapcss._value_const_capture(capture_tags, 1, 'waterway', 'waterway') and mapcss._tag_capture(capture_tags, 2, tags, 'man_made') == mapcss._value_capture(capture_tags, 2, 'pipeline'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"substance"
# fixChangeKey:"type => substance"
err.append({'class': 9002001, 'subclass': 877981524, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['substance', mapcss.tag(tags, 'type')]]),
'-': ([
'type'])
}})
# *[landuse=farm]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'farm'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=farmland"
# suggestAlternative:"landuse=farmyard"
err.append({'class': 9002001, 'subclass': 1968473048, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[seamark=buoy]["seamark:type"=~/^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$/]
if ('seamark' in keys and 'seamark:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'seamark') == mapcss._value_capture(capture_tags, 0, 'buoy') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_61b0be1b), mapcss._tag_capture(capture_tags, 1, tags, 'seamark:type')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"{1.tag}"
# fixRemove:"seamark"
err.append({'class': 9002001, 'subclass': 1224401740, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'seamark'])
}})
# *[seamark=buoy]["seamark:type"!~/^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$/]
if ('seamark' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'seamark') == mapcss._value_capture(capture_tags, 0, 'buoy') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_61b0be1b, '^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$'), mapcss._tag_capture(capture_tags, 1, tags, 'seamark:type')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"{1.tag}"
err.append({'class': 9002001, 'subclass': 1481035998, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[landuse=conservation]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'conservation'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"boundary=protected_area"
# fixAdd:"boundary=protected_area"
# fixRemove:"landuse"
err.append({'class': 9002001, 'subclass': 824801072, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['boundary','protected_area']]),
'-': ([
'landuse'])
}})
# *[amenity=kiosk]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'kiosk'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=kiosk"
# fixChangeKey:"amenity => shop"
err.append({'class': 9002001, 'subclass': 1331930630, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=shop]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'shop'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=*"
err.append({'class': 9002001, 'subclass': 1562207150, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=fishmonger]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fishmonger'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=seafood"
# fixAdd:"shop=seafood"
err.append({'class': 9002001, 'subclass': 1376789416, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','seafood']])
}})
# *[shop=fish]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fish'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=fishing"
# suggestAlternative:"shop=pet"
# suggestAlternative:"shop=seafood"
err.append({'class': 9002001, 'subclass': 47191734, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=betting]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'betting'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=casino"
# suggestAlternative:"amenity=gambling"
# suggestAlternative:"leisure=adult_gaming_centre"
# suggestAlternative:"leisure=amusement_arcade"
# suggestAlternative:"shop=bookmaker"
# suggestAlternative:"shop=lottery"
err.append({'class': 9002001, 'subclass': 1035501389, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=perfume]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'perfume'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=perfumery"
# fixAdd:"shop=perfumery"
err.append({'class': 9002001, 'subclass': 2075099676, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','perfumery']])
}})
# *[amenity=exercise_point]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'exercise_point'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=fitness_station"
# fixRemove:"amenity"
# fixAdd:"leisure=fitness_station"
err.append({'class': 9002001, 'subclass': 1514920202, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure','fitness_station']]),
'-': ([
'amenity'])
}})
# *[shop=auto_parts]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'auto_parts'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=car_parts"
# fixAdd:"shop=car_parts"
err.append({'class': 9002001, 'subclass': 1675828779, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','car_parts']])
}})
# *[amenity=car_repair]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'car_repair'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=car_repair"
# fixChangeKey:"amenity => shop"
err.append({'class': 9002001, 'subclass': 1681273585, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=studio][type=audio]
# *[amenity=studio][type=radio]
# *[amenity=studio][type=television]
# *[amenity=studio][type=video]
if ('amenity' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'audio'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'radio'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'television'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'video'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"studio"
# fixChangeKey:"type => studio"
err.append({'class': 9002001, 'subclass': 413401822, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['studio', mapcss.tag(tags, 'type')]]),
'-': ([
'type'])
}})
# *[power=cable_distribution_cabinet]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'cable_distribution_cabinet'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=street_cabinet + street_cabinet=*"
# fixAdd:"man_made=street_cabinet"
# fixRemove:"power"
err.append({'class': 9002001, 'subclass': 1007567078, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','street_cabinet']]),
'-': ([
'power'])
}})
# *[power][location=kiosk]
if ('location' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') and mapcss._tag_capture(capture_tags, 1, tags, 'location') == mapcss._value_capture(capture_tags, 1, 'kiosk'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# fixRemove:"location"
# fixAdd:"man_made=street_cabinet"
# fixAdd:"street_cabinet=power"
err.append({'class': 9002001, 'subclass': 182905067, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','street_cabinet'],
['street_cabinet','power']]),
'-': ([
'location'])
}})
# *[man_made=well]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'well'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=petroleum_well"
# suggestAlternative:"man_made=water_well"
err.append({'class': 9002001, 'subclass': 1740864107, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=dog_bin]
# *[amenity=dog_waste_bin]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'dog_bin'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'dog_waste_bin'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=waste_basket + waste=dog_excrement + vending=excrement_bags"
# fixAdd:"amenity=waste_basket"
# fixAdd:"vending=excrement_bags"
# fixAdd:"waste=dog_excrement"
err.append({'class': 9002001, 'subclass': 2091877281, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','waste_basket'],
['vending','excrement_bags'],
['waste','dog_excrement']])
}})
# *[amenity=artwork]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'artwork'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=artwork"
# fixRemove:"amenity"
# fixAdd:"tourism=artwork"
err.append({'class': 9002001, 'subclass': 728429076, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tourism','artwork']]),
'-': ([
'amenity'])
}})
# *[amenity=community_center]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'community_center'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=community_centre"
# fixAdd:"amenity=community_centre"
err.append({'class': 9002001, 'subclass': 690512681, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','community_centre']])
}})
# *[man_made=cut_line]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'cut_line'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=cutline"
# fixAdd:"man_made=cutline"
err.append({'class': 9002001, 'subclass': 1008752382, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','cutline']])
}})
# *[amenity=park]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'park'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=park"
# fixRemove:"amenity"
# fixAdd:"leisure=park"
err.append({'class': 9002001, 'subclass': 2085280194, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure','park']]),
'-': ([
'amenity'])
}})
# *[amenity=hotel]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hotel'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=hotel"
# fixRemove:"amenity"
# fixAdd:"tourism=hotel"
err.append({'class': 9002001, 'subclass': 1341786818, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tourism','hotel']]),
'-': ([
'amenity'])
}})
# *[shop=window]
# *[shop=windows]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'window'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'windows'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=window_construction"
# fixAdd:"craft=window_construction"
# fixRemove:"shop"
err.append({'class': 9002001, 'subclass': 532391183, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['craft','window_construction']]),
'-': ([
'shop'])
}})
# *[amenity=education]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'education'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=college"
# suggestAlternative:"amenity=school"
# suggestAlternative:"amenity=university"
err.append({'class': 9002001, 'subclass': 796960259, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=gallery]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'gallery'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=art"
# fixAdd:"shop=art"
err.append({'class': 9002001, 'subclass': 1319611546, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','art']])
}})
# *[shop=gambling]
# *[leisure=gambling]
if ('leisure' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'gambling'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'gambling'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=casino"
# suggestAlternative:"amenity=gambling"
# suggestAlternative:"leisure=amusement_arcade"
# suggestAlternative:"shop=bookmaker"
# suggestAlternative:"shop=lottery"
err.append({'class': 9002001, 'subclass': 1955724853, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[office=real_estate]
# *[office=real_estate_agent]
if ('office' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'real_estate'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'real_estate_agent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=estate_agent"
# fixAdd:"office=estate_agent"
err.append({'class': 9002001, 'subclass': 2027311706, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['office','estate_agent']])
}})
# *[shop=glass]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'glass'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=glaziery"
# suggestAlternative:"shop=glaziery"
err.append({'class': 9002001, 'subclass': 712020531, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=proposed]
# *[amenity=disused]
# *[shop=disused]
# *[highway=abandoned]
# *[historic=abandoned]
if ('amenity' in keys) or ('highway' in keys) or ('historic' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'proposed'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Use the {1}: key prefix instead.","{0.tag}","{0.value}")
err.append({'class': 9002001, 'subclass': 847809313, 'text': mapcss.tr('{0} is deprecated. Use the {1}: key prefix instead.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[amenity=swimming_pool]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'swimming_pool'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=swimming_pool"
# fixChangeKey:"amenity => leisure"
err.append({'class': 9002001, 'subclass': 2012807801, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=sauna]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'sauna'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=sauna"
# fixChangeKey:"amenity => leisure"
err.append({'class': 9002001, 'subclass': 1450116742, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[/^[^t][^i][^g].+_[0-9]$/][!/^note_[0-9]$/][!/^description_[0-9]$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_300dfa36) and not mapcss._tag_capture(capture_tags, 1, tags, self.re_3185ac6d) and not mapcss._tag_capture(capture_tags, 2, tags, self.re_6d27b157))
except mapcss.RuleAbort: pass
if match:
# group:tr("questionable key (ending with a number)")
# throwWarning:tr("{0}","{0.key}")
# assertNoMatch:"way description_3=foo"
# assertMatch:"way name_1=foo"
# assertNoMatch:"way note_2=foo"
# assertNoMatch:"way tiger:name_base_1=bar"
err.append({'class': 9002014, 'subclass': 2081989305, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[sport=skating]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'skating'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=ice_skating"
# suggestAlternative:"sport=roller_skating"
err.append({'class': 9002001, 'subclass': 170699177, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[barrier=wall][type=noise_barrier][!wall]
# way[barrier=wall][type=noise_barrier][wall=noise_barrier]
if ('barrier' in keys and 'type' in keys) or ('barrier' in keys and 'type' in keys and 'wall' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wall') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'noise_barrier') and not mapcss._tag_capture(capture_tags, 2, tags, 'wall'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wall') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'noise_barrier') and mapcss._tag_capture(capture_tags, 2, tags, 'wall') == mapcss._value_capture(capture_tags, 2, 'noise_barrier'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# suggestAlternative:"wall=noise_barrier"
# fixChangeKey:"type => wall"
err.append({'class': 9002001, 'subclass': 1513752031, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['wall', mapcss.tag(tags, 'type')]]),
'-': ([
'type'])
}})
# way[barrier=wall][type=noise_barrier][wall][wall!=noise_barrier]
if ('barrier' in keys and 'type' in keys and 'wall' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wall') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'noise_barrier') and mapcss._tag_capture(capture_tags, 2, tags, 'wall') and mapcss._tag_capture(capture_tags, 3, tags, 'wall') != mapcss._value_const_capture(capture_tags, 3, 'noise_barrier', 'noise_barrier'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# suggestAlternative:"wall=noise_barrier"
err.append({'class': 9002001, 'subclass': 2130256462, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# *[amenity=public_building]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'public_building'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"..."
# suggestAlternative:"amenity=community_centre"
# suggestAlternative:"amenity=hospital"
# suggestAlternative:"amenity=townhall"
# suggestAlternative:"building=hospital"
# suggestAlternative:"building=public"
# suggestAlternative:"leisure=sports_centre"
# suggestAlternative:"office=government"
err.append({'class': 9002001, 'subclass': 1295642010, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[office=administrative]
if ('office' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'administrative'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=government"
# fixAdd:"office=government"
err.append({'class': 9002001, 'subclass': 213844674, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['office','government']])
}})
# *[vending=news_papers]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'news_papers'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"vending=newspapers"
# fixAdd:"vending=newspapers"
err.append({'class': 9002001, 'subclass': 1133820292, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['vending','newspapers']])
}})
# *[service=drive_through]
if ('service' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'service') == mapcss._value_capture(capture_tags, 0, 'drive_through'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"service=drive-through"
# fixAdd:"service=drive-through"
err.append({'class': 9002001, 'subclass': 283545650, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['service','drive-through']])
}})
# *[noexit][noexit!=yes][noexit!=no]
# way[highway=service][service][service!~/^(alley|drive-through|drive_through|driveway|emergency_access|parking_aisle|rest_area|slipway|yes)$/]
# way[railway=rail][service][service!~/^(crossover|siding|spur|yard)$/]
# way[waterway=canal][service][service!~/^(irrigation|transportation|water_power)$/]
if ('highway' in keys and 'service' in keys) or ('noexit' in keys) or ('railway' in keys and 'service' in keys) or ('service' in keys and 'waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') and mapcss._tag_capture(capture_tags, 1, tags, 'noexit') != mapcss._value_const_capture(capture_tags, 1, 'yes', 'yes') and mapcss._tag_capture(capture_tags, 2, tags, 'noexit') != mapcss._value_const_capture(capture_tags, 2, 'no', 'no'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'service') and mapcss._tag_capture(capture_tags, 1, tags, 'service') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 2, self.re_51df498f, '^(alley|drive-through|drive_through|driveway|emergency_access|parking_aisle|rest_area|slipway|yes)$'), mapcss._tag_capture(capture_tags, 2, tags, 'service')))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'railway') == mapcss._value_capture(capture_tags, 0, 'rail') and mapcss._tag_capture(capture_tags, 1, tags, 'service') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 2, self.re_2fd4cdcf, '^(crossover|siding|spur|yard)$'), mapcss._tag_capture(capture_tags, 2, tags, 'service')))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'canal') and mapcss._tag_capture(capture_tags, 1, tags, 'service') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 2, self.re_7a045a17, '^(irrigation|transportation|water_power)$'), mapcss._tag_capture(capture_tags, 2, tags, 'service')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("The key {0} has an uncommon value.","{1.key}")
err.append({'class': 9002017, 'subclass': 806344140, 'text': mapcss.tr('The key {0} has an uncommon value.', mapcss._tag_uncapture(capture_tags, '{1.key}'))})
# *[name:botanical]
if ('name:botanical' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'name:botanical'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"species"
err.append({'class': 9002001, 'subclass': 1061429000, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[shop=souvenir]
# *[shop=souvenirs]
# *[shop=souveniers]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souvenir'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souvenirs'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souveniers'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=gift"
# fixAdd:"shop=gift"
err.append({'class': 9002001, 'subclass': 1794702946, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','gift']])
}})
# *[vending=animal_food]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'animal_food'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"vending=animal_feed"
# fixAdd:"vending=animal_feed"
err.append({'class': 9002001, 'subclass': 1077411296, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['vending','animal_feed']])
}})
# way[highway=emergency_access_point][phone][!emergency_telephone_code]
if ('highway' in keys and 'phone' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'emergency_access_point') and mapcss._tag_capture(capture_tags, 1, tags, 'phone') and not mapcss._tag_capture(capture_tags, 2, tags, 'emergency_telephone_code'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"emergency_telephone_code"
# fixChangeKey:"phone => emergency_telephone_code"
err.append({'class': 9002001, 'subclass': 904792316, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency_telephone_code', mapcss.tag(tags, 'phone')]]),
'-': ([
'phone'])
}})
# way[highway=emergency_access_point][phone=*emergency_telephone_code]
if ('highway' in keys and 'phone' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'emergency_access_point') and mapcss._tag_capture(capture_tags, 1, tags, 'phone') == mapcss._value_capture(capture_tags, 1, mapcss.tag(tags, 'emergency_telephone_code')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"emergency_telephone_code"
# fixRemove:"phone"
err.append({'class': 9002001, 'subclass': 3132845, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'phone'])
}})
# way[highway=emergency_access_point][phone][emergency_telephone_code][phone!=*emergency_telephone_code]
if ('emergency_telephone_code' in keys and 'highway' in keys and 'phone' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'emergency_access_point') and mapcss._tag_capture(capture_tags, 1, tags, 'phone') and mapcss._tag_capture(capture_tags, 2, tags, 'emergency_telephone_code') and mapcss._tag_capture(capture_tags, 3, tags, 'phone') != mapcss._value_capture(capture_tags, 3, mapcss.tag(tags, 'emergency_telephone_code')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"emergency_telephone_code"
err.append({'class': 9002001, 'subclass': 144379729, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[tracktype=1]
if ('tracktype' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tracktype') == mapcss._value_capture(capture_tags, 0, 1))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"tracktype=grade1"
# fixAdd:"tracktype=grade1"
err.append({'class': 9002018, 'subclass': 823078782, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tracktype','grade1']])
}})
# way[tracktype=2]
if ('tracktype' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tracktype') == mapcss._value_capture(capture_tags, 0, 2))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"tracktype=grade2"
# fixAdd:"tracktype=grade2"
err.append({'class': 9002018, 'subclass': 652259155, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tracktype','grade2']])
}})
# way[tracktype=3]
if ('tracktype' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tracktype') == mapcss._value_capture(capture_tags, 0, 3))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"tracktype=grade3"
# fixAdd:"tracktype=grade3"
err.append({'class': 9002018, 'subclass': 1624412111, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tracktype','grade3']])
}})
# way[tracktype=4]
if ('tracktype' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tracktype') == mapcss._value_capture(capture_tags, 0, 4))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"tracktype=grade4"
# fixAdd:"tracktype=grade4"
err.append({'class': 9002018, 'subclass': 808384986, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tracktype','grade4']])
}})
# way[tracktype=5]
if ('tracktype' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tracktype') == mapcss._value_capture(capture_tags, 0, 5))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"tracktype=grade5"
# fixAdd:"tracktype=grade5"
err.append({'class': 9002018, 'subclass': 1050276122, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tracktype','grade5']])
}})
# way[tracktype][tracktype!~/^(1|2|3|4|5|grade1|grade2|grade3|grade4|grade5)$/]
if ('tracktype' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tracktype') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_047d5648, '^(1|2|3|4|5|grade1|grade2|grade3|grade4|grade5)$'), mapcss._tag_capture(capture_tags, 1, tags, 'tracktype')))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("wrong value: {0}","{0.tag}")
# suggestAlternative:"tracktype=grade1"
# suggestAlternative:"tracktype=grade2"
# suggestAlternative:"tracktype=grade3"
# suggestAlternative:"tracktype=grade4"
# suggestAlternative:"tracktype=grade5"
err.append({'class': 9002019, 'subclass': 1665196665, 'text': mapcss.tr('wrong value: {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=hunting_stand][lock=yes]
# *[amenity=hunting_stand][lock=no]
if ('amenity' in keys and 'lock' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hunting_stand') and mapcss._tag_capture(capture_tags, 1, tags, 'lock') == mapcss._value_capture(capture_tags, 1, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hunting_stand') and mapcss._tag_capture(capture_tags, 1, tags, 'lock') == mapcss._value_capture(capture_tags, 1, 'no'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"lockable"
# fixChangeKey:"lock => lockable"
err.append({'class': 9002001, 'subclass': 1939599742, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['lockable', mapcss.tag(tags, 'lock')]]),
'-': ([
'lock'])
}})
# *[amenity=advertising][!advertising]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'advertising') and not mapcss._tag_capture(capture_tags, 1, tags, 'advertising'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"advertising=*"
err.append({'class': 9002001, 'subclass': 1696784412, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=advertising][advertising]
if ('advertising' in keys and 'amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'advertising') and mapcss._tag_capture(capture_tags, 1, tags, 'advertising'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"advertising=*"
# fixRemove:"amenity"
err.append({'class': 9002001, 'subclass': 1538706366, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'amenity'])
}})
# way[direction=up][incline=up]
# way[direction=down][incline=down]
# way[direction=up][!incline]
# way[direction=down][!incline]
if ('direction' in keys) or ('direction' in keys and 'incline' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'direction') == mapcss._value_capture(capture_tags, 0, 'up') and mapcss._tag_capture(capture_tags, 1, tags, 'incline') == mapcss._value_capture(capture_tags, 1, 'up'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'direction') == mapcss._value_capture(capture_tags, 0, 'down') and mapcss._tag_capture(capture_tags, 1, tags, 'incline') == mapcss._value_capture(capture_tags, 1, 'down'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'direction') == mapcss._value_capture(capture_tags, 0, 'up') and not mapcss._tag_capture(capture_tags, 1, tags, 'incline'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'direction') == mapcss._value_capture(capture_tags, 0, 'down') and not mapcss._tag_capture(capture_tags, 1, tags, 'incline'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
# fixChangeKey:"direction => incline"
err.append({'class': 9002001, 'subclass': 1707030473, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['incline', mapcss.tag(tags, 'direction')]]),
'-': ([
'direction'])
}})
# way[direction=up][incline][incline!=up]
# way[direction=down][incline][incline!=down]
if ('direction' in keys and 'incline' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'direction') == mapcss._value_capture(capture_tags, 0, 'up') and mapcss._tag_capture(capture_tags, 1, tags, 'incline') and mapcss._tag_capture(capture_tags, 2, tags, 'incline') != mapcss._value_const_capture(capture_tags, 2, 'up', 'up'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'direction') == mapcss._value_capture(capture_tags, 0, 'down') and mapcss._tag_capture(capture_tags, 1, tags, 'incline') and mapcss._tag_capture(capture_tags, 2, tags, 'incline') != mapcss._value_const_capture(capture_tags, 2, 'down', 'down'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 937812227, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=true]
# *[building="*"]
# *[building=Y]
# *[building=y]
# *[building=1]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'true'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, '*'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'Y'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'y'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 1))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"building=yes"
# fixAdd:"building=yes"
err.append({'class': 9002018, 'subclass': 596818855, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes']])
}})
# *[building=abandoned]
# *[building=address]
# *[building=bing]
# *[building=collapsed]
# *[building=damaged]
# *[building=demolished]
# *[building=disused]
# *[building=fixme]
# *[building=occupied]
# *[building=razed]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'address'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'bing'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'collapsed'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'damaged'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'demolished'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'fixme'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'occupied'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'razed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is not a building type.","{0.tag}")
err.append({'class': 9002001, 'subclass': 938825828, 'text': mapcss.tr('{0} is not a building type.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=other]
# *[building=unclassified]
# *[building=undefined]
# *[building=unknown]
# *[building=unidentified]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'other'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unclassified'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'undefined'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unidentified'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is not a building type.","{0.tag}")
# fixAdd:"building=yes"
err.append({'class': 9002001, 'subclass': 48721080, 'text': mapcss.tr('{0} is not a building type.', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes']])
}})
# way[water=salt]
# way[water=salt_pool]
# way[water=salt_panne]
# way[water=salt_pond]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'salt'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'salt_pool'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'salt_panne'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'salt_pond'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=salt_pond"
# suggestAlternative:"salt=yes"
err.append({'class': 9002001, 'subclass': 403932956, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[water=tidal]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'tidal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tidal=yes"
# fixAdd:"tidal=yes"
# fixRemove:"water"
err.append({'class': 9002001, 'subclass': 1201030806, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tidal','yes']]),
'-': ([
'water'])
}})
# *[amenity=toilet]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'toilet'))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"amenity=toilets"
# fixAdd:"amenity=toilets"
err.append({'class': 9002018, 'subclass': 440018606, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','toilets']])
}})
# way[power=busbar]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'busbar'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=line + line=busbar"
# fixAdd:"line=busbar"
# fixAdd:"power=line"
err.append({'class': 9002001, 'subclass': 2001565557, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line','busbar'],
['power','line']])
}})
# *[man_made=MDF]
# *[man_made=telephone_exchange]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'MDF'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'telephone_exchange'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixRemove:"man_made"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 634698090, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','exchange']]),
'-': ([
'man_made'])
}})
# *[building=central_office]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'central_office'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixAdd:"building=yes"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 1091970270, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes'],
['telecom','exchange']])
}})
# *[telecom=central_office]
if ('telecom' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'telecom') == mapcss._value_capture(capture_tags, 0, 'central_office'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 1503278830, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','exchange']])
}})
# *[natural=waterfall]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'waterfall'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"waterway=waterfall"
# fixChangeKey:"natural => waterway"
err.append({'class': 9002001, 'subclass': 764711734, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['waterway', mapcss.tag(tags, 'natural')]]),
'-': ([
'natural'])
}})
# *[religion=unitarian]
if ('religion' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'religion') == mapcss._value_capture(capture_tags, 0, 'unitarian'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"religion=unitarian_universalist"
# fixAdd:"religion=unitarian_universalist"
err.append({'class': 9002001, 'subclass': 9227331, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['religion','unitarian_universalist']])
}})
# *[shop=shopping_centre]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'shopping_centre'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=mall"
# fixAdd:"shop=mall"
err.append({'class': 9002001, 'subclass': 1448390566, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','mall']])
}})
# *[is_in]
# way[/^is_in:.*$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'is_in'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_493fd1a6))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 1865068642, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[sport=football]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'football'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=american_football"
# suggestAlternative:"sport=australian_football"
# suggestAlternative:"sport=canadian_football"
# suggestAlternative:"sport=gaelic_games"
# suggestAlternative:"sport=rugby_league"
# suggestAlternative:"sport=rugby_union"
# suggestAlternative:"sport=soccer"
err.append({'class': 9002001, 'subclass': 73038577, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=common]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'common'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"designation=common"
# suggestAlternative:"landuse=*"
# suggestAlternative:"leisure=*"
err.append({'class': 9002001, 'subclass': 157636301, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[cuisine=vegan]
# *[cuisine=vegetarian]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') == mapcss._value_capture(capture_tags, 0, 'vegan'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') == mapcss._value_capture(capture_tags, 0, 'vegetarian'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("diet:","{0.value}","=only")
# suggestAlternative:concat("diet:","{0.value}","=yes")
# throwWarning:tr("{0} is deprecated","{0.tag}")
err.append({'class': 9002001, 'subclass': 43604574, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[kitchen_hours]
if ('kitchen_hours' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kitchen_hours'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"opening_hours:kitchen"
# fixChangeKey:"kitchen_hours => opening_hours:kitchen"
err.append({'class': 9002001, 'subclass': 1088306802, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['opening_hours:kitchen', mapcss.tag(tags, 'kitchen_hours')]]),
'-': ([
'kitchen_hours'])
}})
# *[shop=money_transfer]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'money_transfer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=money_transfer"
# fixChangeKey:"shop => amenity"
err.append({'class': 9002001, 'subclass': 1664997936, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'shop')]]),
'-': ([
'shop'])
}})
# *[contact:google_plus]
if ('contact:google_plus' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'contact:google_plus'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"contact:google_plus"
err.append({'class': 9002001, 'subclass': 1869461154, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
'contact:google_plus'])
}})
# *[amenity=garages]
# *[amenity=garage]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'garages'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'garage'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("building=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=parking + parking=garage_boxes"
# suggestAlternative:"landuse=garages"
err.append({'class': 9002001, 'subclass': 863228118, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=winery]
# *[amenity=winery]
if ('amenity' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'winery'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'winery'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=winery"
# suggestAlternative:"shop=wine"
err.append({'class': 9002001, 'subclass': 1773574987, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=youth_centre]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'youth_centre'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=community_centre + community_centre=youth_centre"
# fixAdd:"amenity=community_centre"
# fixAdd:"community_centre=youth_centre"
err.append({'class': 9002001, 'subclass': 1284929085, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','community_centre'],
['community_centre','youth_centre']])
}})
# *[building:type][building=yes]
# *[building:type][!building]
if ('building' in keys and 'building:type' in keys) or ('building:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and mapcss._tag_capture(capture_tags, 1, tags, 'building') == mapcss._value_capture(capture_tags, 1, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and not mapcss._tag_capture(capture_tags, 1, tags, 'building'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"building"
# fixChangeKey:"building:type => building"
# assertNoMatch:"way building:type=church building=supermarket"
# assertMatch:"way building:type=church building=yes"
# assertMatch:"way building:type=church"
err.append({'class': 9002001, 'subclass': 1927794430, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building', mapcss.tag(tags, 'building:type')]]),
'-': ([
'building:type'])
}})
# *[building:type][building][building!=yes]
if ('building' in keys and 'building:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and mapcss._tag_capture(capture_tags, 1, tags, 'building') and mapcss._tag_capture(capture_tags, 2, tags, 'building') != mapcss._value_const_capture(capture_tags, 2, 'yes', 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"building"
# assertMatch:"way building:type=church building=supermarket"
# assertNoMatch:"way building:type=church building=yes"
# assertNoMatch:"way building:type=church"
err.append({'class': 9002001, 'subclass': 1133239698, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[escalator]
if ('escalator' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'escalator'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"highway=steps + conveying=*"
err.append({'class': 9002001, 'subclass': 967271828, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[fenced]
if ('fenced' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fenced'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"barrier=fence"
err.append({'class': 9002001, 'subclass': 1141285220, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[historic_name][!old_name]
if ('historic_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic_name') and not mapcss._tag_capture(capture_tags, 1, tags, 'old_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"old_name"
# fixChangeKey:"historic_name => old_name"
err.append({'class': 9002001, 'subclass': 1034538127, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['old_name', mapcss.tag(tags, 'historic_name')]]),
'-': ([
'historic_name'])
}})
# *[historic_name][old_name]
if ('historic_name' in keys and 'old_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic_name') and mapcss._tag_capture(capture_tags, 1, tags, 'old_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"old_name"
err.append({'class': 9002001, 'subclass': 30762614, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[landuse=field]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'field'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=farmland"
# fixAdd:"landuse=farmland"
err.append({'class': 9002001, 'subclass': 426261497, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['landuse','farmland']])
}})
# *[leisure=beach]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'beach'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=beach_resort"
# suggestAlternative:"natural=beach"
err.append({'class': 9002001, 'subclass': 1767286055, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=club]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'club'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"club=*"
err.append({'class': 9002001, 'subclass': 1282397509, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=video_arcade]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'video_arcade'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=adult_gaming_centre"
# suggestAlternative:"leisure=amusement_arcade"
err.append({'class': 9002001, 'subclass': 1463909830, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[man_made=jetty]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'jetty'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=pier"
# fixAdd:"man_made=pier"
err.append({'class': 9002001, 'subclass': 192707176, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','pier']])
}})
# *[man_made=village_pump]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'village_pump'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=water_well"
# fixAdd:"man_made=water_well"
err.append({'class': 9002001, 'subclass': 423232686, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','water_well']])
}})
# *[man_made=water_tank]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'water_tank'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=storage_tank + content=water"
# fixAdd:"content=water"
# fixAdd:"man_made=storage_tank"
err.append({'class': 9002001, 'subclass': 563629665, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['content','water'],
['man_made','storage_tank']])
}})
# *[natural=moor]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'moor'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=meadow + meadow=agricultural"
# suggestAlternative:"natural=fell"
# suggestAlternative:"natural=grassland"
# suggestAlternative:"natural=heath"
# suggestAlternative:"natural=scrub"
# suggestAlternative:"natural=tundra"
# suggestAlternative:"natural=wetland"
err.append({'class': 9002001, 'subclass': 374637717, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[noexit=no][!fixme]
if ('noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') == mapcss._value_capture(capture_tags, 0, 'no') and not mapcss._tag_capture(capture_tags, 1, tags, 'fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"fixme=continue"
# fixAdd:"fixme=continue"
# fixRemove:"noexit"
err.append({'class': 9002001, 'subclass': 647435126, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['fixme','continue']]),
'-': ([
'noexit'])
}})
# *[noexit=no][fixme]
if ('fixme' in keys and 'noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') == mapcss._value_capture(capture_tags, 0, 'no') and mapcss._tag_capture(capture_tags, 1, tags, 'fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"fixme=continue"
err.append({'class': 9002001, 'subclass': 881828009, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=dive]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'dive'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=scuba_diving"
# fixAdd:"shop=scuba_diving"
err.append({'class': 9002001, 'subclass': 1582968978, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','scuba_diving']])
}})
# *[shop=furnace]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'furnace'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=plumber"
# suggestAlternative:"shop=fireplace"
err.append({'class': 9002001, 'subclass': 1155821104, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=paragliding]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'paragliding'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=free_flying"
# fixAdd:"sport=free_flying"
err.append({'class': 9002001, 'subclass': 1531788430, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','free_flying']])
}})
# *[tourism=bed_and_breakfast]
if ('tourism' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tourism') == mapcss._value_capture(capture_tags, 0, 'bed_and_breakfast'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=guest_house + guest_house=bed_and_breakfast"
# fixAdd:"guest_house=bed_and_breakfast"
# fixAdd:"tourism=guest_house"
err.append({'class': 9002001, 'subclass': 954237438, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['guest_house','bed_and_breakfast'],
['tourism','guest_house']])
}})
# *[diaper=yes]
# *[diaper=no]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper => changing_table"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 1957125311, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table', mapcss.tag(tags, 'diaper')]]),
'-': ([
'diaper'])
}})
# *[diaper][diaper=~/^[1-9][0-9]*$/]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_0f294fdf), mapcss._tag_capture(capture_tags, 1, tags, 'diaper')))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table=yes + changing_table:count=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixAdd:"changing_table=yes"
# fixChangeKey:"diaper => changing_table:count"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 2105051472, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table','yes'],
['changing_table:count', mapcss.tag(tags, 'diaper')]]),
'-': ([
'diaper'])
}})
# *[diaper=room]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'room'))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table=dedicated_room"
# suggestAlternative:"changing_table=room"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 883202329, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[diaper]!.diaper_checked
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_diaper_checked and mapcss._tag_capture(capture_tags, 0, tags, 'diaper'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"changing_table"
err.append({'class': 9002001, 'subclass': 693675339, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[diaper:male=yes]
if ('diaper:male' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:male') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=male_toilet"
# fixAdd:"changing_table:location=male_toilet"
# fixRemove:"diaper:male"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 799035479, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','male_toilet']]),
'-': ([
'diaper:male'])
}})
# *[diaper:female=yes]
if ('diaper:female' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:female') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=female_toilet"
# fixAdd:"changing_table:location=female_toilet"
# fixRemove:"diaper:female"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1450901137, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','female_toilet']]),
'-': ([
'diaper:female'])
}})
# *[diaper:unisex=yes]
if ('diaper:unisex' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:unisex') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=unisex_toilet"
# fixAdd:"changing_table:location=unisex_toilet"
# fixRemove:"diaper:unisex"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1460378712, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','unisex_toilet']]),
'-': ([
'diaper:unisex'])
}})
# *[diaper:wheelchair=yes]
# *[diaper:wheelchair=no]
if ('diaper:wheelchair' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:wheelchair') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:wheelchair') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table:wheelchair=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper:wheelchair => changing_table:wheelchair"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1951967281, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:wheelchair', mapcss.tag(tags, 'diaper:wheelchair')]]),
'-': ([
'diaper:wheelchair'])
}})
# *[diaper:fee=yes]
# *[diaper:fee=no]
if ('diaper:fee' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:fee') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:fee') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table:fee=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper:fee => changing_table:fee"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 2008573526, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:fee', mapcss.tag(tags, 'diaper:fee')]]),
'-': ([
'diaper:fee'])
}})
# *[/^diaper:/]!.diaper___checked
if True:
match = False
if not match:
capture_tags = {}
try: match = (not set_diaper___checked and mapcss._tag_capture(capture_tags, 0, tags, self.re_6029fe03))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","diaper:*")
# suggestAlternative:"changing_table:*"
err.append({'class': 9002001, 'subclass': 26578864, 'text': mapcss.tr('{0} is deprecated', 'diaper:*')})
# *[changing_table][changing_table!~/^(yes|no|limited)$/]
if ('changing_table' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'changing_table') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_787405b1, '^(yes|no|limited)$'), mapcss._tag_capture(capture_tags, 1, tags, 'changing_table')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("wrong value: {0}","{0.tag}")
# suggestAlternative:"changing_table=limited"
# suggestAlternative:"changing_table=no"
# suggestAlternative:"changing_table=yes"
err.append({'class': 9002019, 'subclass': 1965225408, 'text': mapcss.tr('wrong value: {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[roof:shape=half_hipped]
if ('roof:shape' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'roof:shape') == mapcss._value_capture(capture_tags, 0, 'half_hipped'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"roof:shape=half-hipped"
# fixAdd:"roof:shape=half-hipped"
err.append({'class': 9002001, 'subclass': 1548347123, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['roof:shape','half-hipped']])
}})
# *[bridge_name]
if ('bridge_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"bridge:name"
# fixChangeKey:"bridge_name => bridge:name"
err.append({'class': 9002001, 'subclass': 80069399, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:name', mapcss.tag(tags, 'bridge_name')]]),
'-': ([
'bridge_name'])
}})
# *[access=public]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'public'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"access=yes"
# fixAdd:"access=yes"
err.append({'class': 9002001, 'subclass': 1115157097, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['access','yes']])
}})
# *[crossing=island]
if ('crossing' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'crossing') == mapcss._value_capture(capture_tags, 0, 'island'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"crossing:island=yes"
# fixRemove:"crossing"
# fixAdd:"crossing:island=yes"
err.append({'class': 9002001, 'subclass': 1512561318, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['crossing:island','yes']]),
'-': ([
'crossing'])
}})
# *[recycling:metal]
if ('recycling:metal' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'recycling:metal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"recycling:scrap_metal"
# fixChangeKey:"recycling:metal => recycling:scrap_metal"
err.append({'class': 9002001, 'subclass': 474491272, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['recycling:scrap_metal', mapcss.tag(tags, 'recycling:metal')]]),
'-': ([
'recycling:metal'])
}})
# *[shop=dog_grooming]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'dog_grooming'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=pet_grooming"
# fixAdd:"shop=pet_grooming"
err.append({'class': 9002001, 'subclass': 1073412885, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','pet_grooming']])
}})
# *[tower:type=anchor]
# *[tower:type=suspension]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'anchor'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# suggestAlternative:concat("line_attachment=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"tower:type => line_attachment"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 180380605, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_attachment', mapcss.tag(tags, 'tower:type')]]),
'-': ([
'tower:type'])
}})
# *[tower:type=branch][branch:type=split]
# *[tower:type=branch][branch:type=loop]
if ('branch:type' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'split'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'loop'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=split"
# fixRemove:"branch:type"
# fixAdd:"line_management=split"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 362350862, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','split']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=branch][!branch:type]
# *[tower:type=branch][branch:type=tap]
if ('branch:type' in keys and 'tower:type' in keys) or ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and not mapcss._tag_capture(capture_tags, 1, tags, 'branch:type'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'tap'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=branch"
# fixRemove:"branch:type"
# fixAdd:"line_management=branch"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 476423517, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','branch']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=branch][branch:type=cross]
if ('branch:type' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'cross'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=cross"
# fixRemove:"branch:type"
# fixAdd:"line_management=cross"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 2103059531, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','cross']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=termination]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'termination'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=termination"
# fixAdd:"line_management=termination"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 232235847, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','termination']]),
'-': ([
'tower:type'])
}})
# *[tower:type=transition]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'transition'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location:transition=yes"
# fixAdd:"location:transition=yes"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1124904944, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location:transition','yes']]),
'-': ([
'tower:type'])
}})
# *[tower:type=transposing]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'transposing'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=transpose"
# fixAdd:"line_management=transpose"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1795169098, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','transpose']]),
'-': ([
'tower:type'])
}})
# *[tower:type=crossing]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'crossing'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"height=* + design=*"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1301565974, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[tower:type][power][power=~/^(tower|pole|insulator|portal|terminal)$/]!.power_tower_type_warning
if ('power' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_power_tower_type_warning and mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') and mapcss._tag_capture(capture_tags, 1, tags, 'power') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 2, self.re_24dfeb95), mapcss._tag_capture(capture_tags, 2, tags, 'power')))
except mapcss.RuleAbort: pass
if match:
# setgeneric_power_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"design"
# suggestAlternative:"line_attachment"
# suggestAlternative:"line_management"
# suggestAlternative:"structure"
set_generic_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 2020421267, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# *[pole:type][power][power=~/^(tower|pole|insulator|portal|terminal)$/]!.power_pole_type_warning!.generic_power_tower_type_warning
if ('pole:type' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_power_pole_type_warning and not set_generic_power_tower_type_warning and mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') and mapcss._tag_capture(capture_tags, 1, tags, 'power') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 2, self.re_24dfeb95), mapcss._tag_capture(capture_tags, 2, tags, 'power')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"line_attachment"
# suggestAlternative:"line_management"
err.append({'class': 9002001, 'subclass': 1513543887, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# way[barrier=embankment]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'embankment'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"embankment=yes"
# suggestAlternative:"man_made=embankment"
err.append({'class': 9002001, 'subclass': 2131554464, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[landuse=churchyard]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'churchyard'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=graveyard"
# suggestAlternative:"landuse=religious"
err.append({'class': 9002001, 'subclass': 1973571425, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sloped_curb=yes][!kerb]
# *[sloped_curb=both][!kerb]
if ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'yes') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'both') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"kerb=lowered"
# fixAdd:"kerb=lowered"
# fixRemove:"sloped_curb"
err.append({'class': 9002001, 'subclass': 1906002413, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['kerb','lowered']]),
'-': ([
'sloped_curb'])
}})
# *[sloped_curb=no][!kerb]
if ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'no') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"kerb=yes"
# fixAdd:"kerb=yes"
# fixRemove:"sloped_curb"
err.append({'class': 9002001, 'subclass': 893727015, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['kerb','yes']]),
'-': ([
'sloped_curb'])
}})
# *[sloped_curb][sloped_curb!~/^(yes|both|no)$/][!kerb]
# *[sloped_curb][kerb]
if ('kerb' in keys and 'sloped_curb' in keys) or ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_01eb1711, '^(yes|both|no)$'), mapcss._tag_capture(capture_tags, 1, tags, 'sloped_curb')) and not mapcss._tag_capture(capture_tags, 2, tags, 'kerb'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') and mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"kerb=*"
err.append({'class': 9002001, 'subclass': 1682376745, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[unnamed=yes]
if ('unnamed' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'unnamed') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"noname=yes"
# fixChangeKey:"unnamed => noname"
err.append({'class': 9002001, 'subclass': 1901447020, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['noname', mapcss.tag(tags, 'unnamed')]]),
'-': ([
'unnamed'])
}})
# way[segregated][segregated!=yes][segregated!=no]
if ('segregated' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'segregated') and mapcss._tag_capture(capture_tags, 1, tags, 'segregated') != mapcss._value_const_capture(capture_tags, 1, 'yes', 'yes') and mapcss._tag_capture(capture_tags, 2, tags, 'segregated') != mapcss._value_const_capture(capture_tags, 2, 'no', 'no'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("unusual value of {0}","{0.key}")
err.append({'class': 9002020, 'subclass': 1585094150, 'text': mapcss.tr('unusual value of {0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# way[bicycle:oneway]
if ('bicycle:oneway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bicycle:oneway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"oneway:bicycle"
# fixChangeKey:"bicycle:oneway => oneway:bicycle"
err.append({'class': 9002001, 'subclass': 919622980, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['oneway:bicycle', mapcss.tag(tags, 'bicycle:oneway')]]),
'-': ([
'bicycle:oneway'])
}})
# *[building:height]
if ('building:height' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:height'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"height"
# fixChangeKey:"building:height => height"
err.append({'class': 9002001, 'subclass': 1328174745, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['height', mapcss.tag(tags, 'building:height')]]),
'-': ([
'building:height'])
}})
# *[building:min_height]
if ('building:min_height' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:min_height'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"min_height"
# fixChangeKey:"building:min_height => min_height"
err.append({'class': 9002001, 'subclass': 1042683921, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['min_height', mapcss.tag(tags, 'building:min_height')]]),
'-': ([
'building:min_height'])
}})
# way[highway][construction=yes][highway!=construction]
if ('construction' in keys and 'highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') and mapcss._tag_capture(capture_tags, 1, tags, 'construction') == mapcss._value_capture(capture_tags, 1, 'yes') and mapcss._tag_capture(capture_tags, 2, tags, 'highway') != mapcss._value_const_capture(capture_tags, 2, 'construction', 'construction'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("highway=construction + construction=","{0.value}")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# suggestAlternative:"construction=minor"
err.append({'class': 9002001, 'subclass': 585996498, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# *[car][amenity=charging_station]
if ('amenity' in keys and 'car' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'car') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') == mapcss._value_capture(capture_tags, 1, 'charging_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"motorcar"
# fixChangeKey:"car => motorcar"
err.append({'class': 9002001, 'subclass': 1165117414, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['motorcar', mapcss.tag(tags, 'car')]]),
'-': ([
'car'])
}})
# *[navigationaid=approach_light]
# *[navigationaid="ALS (Approach lighting system)"]
if ('navigationaid' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'navigationaid') == mapcss._value_capture(capture_tags, 0, 'approach_light'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'navigationaid') == mapcss._value_capture(capture_tags, 0, 'ALS (Approach lighting system)'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"navigationaid=als"
# fixAdd:"navigationaid=als"
err.append({'class': 9002001, 'subclass': 1577817081, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['navigationaid','als']])
}})
# *[water=riverbank][!natural]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'riverbank') and not mapcss._tag_capture(capture_tags, 1, tags, 'natural'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=water + water=river"
# fixAdd:"natural=water"
# fixAdd:"water=river"
err.append({'class': 9002001, 'subclass': 186872153, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','water'],
['water','river']])
}})
# *[water=riverbank][natural]
if ('natural' in keys and 'water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'riverbank') and mapcss._tag_capture(capture_tags, 1, tags, 'natural'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=water + water=river"
err.append({'class': 9002001, 'subclass': 630806094, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[amenity=bench][capacity][!seats]
if ('amenity' in keys and 'capacity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'bench') and mapcss._tag_capture(capture_tags, 1, tags, 'capacity') and not mapcss._tag_capture(capture_tags, 2, tags, 'seats'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"seats"
# fixChangeKey:"capacity => seats"
err.append({'class': 9002001, 'subclass': 1511456494, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['seats', mapcss.tag(tags, 'capacity')]]),
'-': ([
'capacity'])
}})
# way[amenity=bench][capacity][seats]
if ('amenity' in keys and 'capacity' in keys and 'seats' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'bench') and mapcss._tag_capture(capture_tags, 1, tags, 'capacity') and mapcss._tag_capture(capture_tags, 2, tags, 'seats'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"seats"
err.append({'class': 9002001, 'subclass': 1445114632, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# way[stream=intermittent]
if ('stream' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'stream') == mapcss._value_capture(capture_tags, 0, 'intermittent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"intermittent=yes"
# suggestAlternative:"seasonal=yes"
err.append({'class': 9002001, 'subclass': 1710194213, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=lamps]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'lamps'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=lighting"
# fixAdd:"shop=lighting"
err.append({'class': 9002001, 'subclass': 746886011, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','lighting']])
}})
# *[access=customer]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'customer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"access=customers"
# fixAdd:"access=customers"
err.append({'class': 9002001, 'subclass': 1040065637, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['access','customers']])
}})
# *[addr:inclusion=estimated]
if ('addr:inclusion' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'addr:inclusion') == mapcss._value_capture(capture_tags, 0, 'estimated'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"addr:inclusion=estimate"
# fixAdd:"addr:inclusion=estimate"
err.append({'class': 9002001, 'subclass': 1002643753, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['addr:inclusion','estimate']])
}})
# *[building=apartment]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'apartment'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"building=apartments"
# fixAdd:"building=apartments"
err.append({'class': 9002001, 'subclass': 1384168519, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','apartments']])
}})
# *[generator:type=solar_photovoltaic_panels]
if ('generator:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:type') == mapcss._value_capture(capture_tags, 0, 'solar_photovoltaic_panels'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:type=solar_photovoltaic_panel"
# fixAdd:"generator:type=solar_photovoltaic_panel"
err.append({'class': 9002001, 'subclass': 1146719875, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:type','solar_photovoltaic_panel']])
}})
# *[building=part]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'part'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"building:part=yes"
err.append({'class': 9002001, 'subclass': 455695847, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=sink_hole]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'sink_hole'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=sinkhole"
# fixAdd:"natural=sinkhole"
err.append({'class': 9002001, 'subclass': 1283355945, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','sinkhole']])
}})
# *[climbing:grade:UIAA:min]
if ('climbing:grade:UIAA:min' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:min'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:min"
# fixChangeKey:"climbing:grade:UIAA:min => climbing:grade:uiaa:min"
err.append({'class': 9002001, 'subclass': 1408052420, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:min', mapcss.tag(tags, 'climbing:grade:UIAA:min')]]),
'-': ([
'climbing:grade:UIAA:min'])
}})
# *[climbing:grade:UIAA:max]
if ('climbing:grade:UIAA:max' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:max'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:max"
# fixChangeKey:"climbing:grade:UIAA:max => climbing:grade:uiaa:max"
err.append({'class': 9002001, 'subclass': 1866245426, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:max', mapcss.tag(tags, 'climbing:grade:UIAA:max')]]),
'-': ([
'climbing:grade:UIAA:max'])
}})
# *[climbing:grade:UIAA:mean]
if ('climbing:grade:UIAA:mean' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:mean'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:mean"
# fixChangeKey:"climbing:grade:UIAA:mean => climbing:grade:uiaa:mean"
err.append({'class': 9002001, 'subclass': 1022648087, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:mean', mapcss.tag(tags, 'climbing:grade:UIAA:mean')]]),
'-': ([
'climbing:grade:UIAA:mean'])
}})
# *[climbing:grade:UIAA]
if ('climbing:grade:UIAA' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa"
# fixChangeKey:"climbing:grade:UIAA => climbing:grade:uiaa"
err.append({'class': 9002001, 'subclass': 1007893519, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa', mapcss.tag(tags, 'climbing:grade:UIAA')]]),
'-': ([
'climbing:grade:UIAA'])
}})
# *[cuisine][cuisine=~/^(?i)(bbq)$/]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_2f881233), mapcss._tag_capture(capture_tags, 1, tags, 'cuisine')))
except mapcss.RuleAbort: pass
if match:
# setbbq_autofix
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"cuisine=barbecue"
# fixAdd:"cuisine=barbecue"
# assertMatch:"way cuisine=BBQ"
# assertMatch:"way cuisine=bbq"
# assertNoMatch:"way cuisine=bbq;pizza"
# assertNoMatch:"way cuisine=korean_bbq"
# assertNoMatch:"way cuisine=korean_bbq;bbq"
# assertNoMatch:"way cuisine=pasta;bbq;pizza"
# assertNoMatch:"way cuisine=pizza;Bbq"
# assertNoMatch:"way cuisine=pizza;bbq"
set_bbq_autofix = True
err.append({'class': 9002001, 'subclass': 1943338875, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['cuisine','barbecue']])
}})
# *[cuisine=~/(?i)(;bbq|bbq;)/][cuisine!~/(?i)(_bbq)/]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_340a2b31), mapcss._tag_capture(capture_tags, 0, tags, 'cuisine')) and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_7d409ed5, '(?i)(_bbq)'), mapcss._tag_capture(capture_tags, 1, tags, 'cuisine')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","cuisine=bbq")
# suggestAlternative:"cuisine=barbecue"
# assertNoMatch:"way cuisine=BBQ"
# assertNoMatch:"way cuisine=bbq"
# assertMatch:"way cuisine=bbq;pizza"
# assertNoMatch:"way cuisine=korean_bbq"
# assertNoMatch:"way cuisine=korean_bbq;bbq"
# assertMatch:"way cuisine=pasta;bbq;pizza"
# assertMatch:"way cuisine=pizza;Bbq"
# assertMatch:"way cuisine=pizza;bbq"
err.append({'class': 9002001, 'subclass': 1958782130, 'text': mapcss.tr('{0} is deprecated', 'cuisine=bbq')})
# way[cycleway=none]
# way[cycleway:left=none]
# way[cycleway:right=none]
# way[shoulder=none]
if ('cycleway' in keys) or ('cycleway:left' in keys) or ('cycleway:right' in keys) or ('shoulder' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cycleway') == mapcss._value_capture(capture_tags, 0, 'none'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cycleway:left') == mapcss._value_capture(capture_tags, 0, 'none'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cycleway:right') == mapcss._value_capture(capture_tags, 0, 'none'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shoulder') == mapcss._value_capture(capture_tags, 0, 'none'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("{0.key}","=no")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixAdd:concat("{0.key}","=no")
err.append({'class': 9002001, 'subclass': 1752530337, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
(mapcss.concat(mapcss._tag_uncapture(capture_tags, '{0.key}'), '=no')).split('=', 1)])
}})
# *[Fixme]
if ('Fixme' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'Fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"fixme"
# fixChangeKey:"Fixme => fixme"
# assertNoMatch:"way FIXME=foo"
# assertMatch:"way Fixme=foo"
# assertNoMatch:"way fixme=foo"
err.append({'class': 9002001, 'subclass': 592643943, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['fixme', mapcss.tag(tags, 'Fixme')]]),
'-': ([
'Fixme'])
}})
# *[amenity=embassy]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'embassy'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=diplomatic + diplomatic=embassy"
# fixChangeKey:"amenity => diplomatic"
# fixAdd:"office=diplomatic"
err.append({'class': 9002001, 'subclass': 1751915206, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['diplomatic', mapcss.tag(tags, 'amenity')],
['office','diplomatic']]),
'-': ([
'amenity'])
}})
return err
def relation(self, data, tags, members):
capture_tags = {}
keys = tags.keys()
err = []
set_bbq_autofix = set_diaper___checked = set_diaper_checked = set_generic_power_tower_type_warning = set_power_pole_type_warning = set_power_tower_type_warning = set_samecolor = False
# *[barrier=wire_fence]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wire_fence'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=fence + fence_type=chain_link"
# fixAdd:"barrier=fence"
# fixAdd:"fence_type=chain_link"
err.append({'class': 9002001, 'subclass': 1107799632, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','fence'],
['fence_type','chain_link']])
}})
# *[barrier=wood_fence]
if ('barrier' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'wood_fence'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=fence + fence_type=wood"
# fixAdd:"barrier=fence"
# fixAdd:"fence_type=wood"
err.append({'class': 9002001, 'subclass': 1412230714, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','fence'],
['fence_type','wood']])
}})
# *[highway=stile]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'stile'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=stile"
# fixAdd:"barrier=stile"
# fixRemove:"highway"
err.append({'class': 9002001, 'subclass': 1435678043, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['barrier','stile']]),
'-': ([
'highway'])
}})
# *[highway=incline]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'incline'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 765169083, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[highway=incline_steep]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'incline_steep'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"incline"
err.append({'class': 9002001, 'subclass': 1966772390, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[highway=unsurfaced]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'unsurfaced'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"highway=* + surface=unpaved"
# fixAdd:"highway=road"
# fixAdd:"surface=unpaved"
err.append({'class': 9002001, 'subclass': 20631498, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['highway','road'],
['surface','unpaved']])
}})
# *[landuse=wood]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'wood'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=forest"
# suggestAlternative:"natural=wood"
err.append({'class': 9002001, 'subclass': 469903103, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=marsh]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'marsh'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=wetland + wetland=marsh"
# fixAdd:"natural=wetland"
# fixAdd:"wetland=marsh"
err.append({'class': 9002001, 'subclass': 1459865523, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','wetland'],
['wetland','marsh']])
}})
# *[highway=byway]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'byway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
err.append({'class': 9002001, 'subclass': 1844620979, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[power_source]
if ('power_source' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power_source'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"generator:source"
err.append({'class': 9002001, 'subclass': 34751027, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[power_rating]
if ('power_rating' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power_rating'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"generator:output"
err.append({'class': 9002001, 'subclass': 904750343, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[shop=antique]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'antique'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=antiques"
# fixAdd:"shop=antiques"
err.append({'class': 9002001, 'subclass': 596668979, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','antiques']])
}})
# *[shop=bags]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'bags'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=bag"
# fixAdd:"shop=bag"
err.append({'class': 9002001, 'subclass': 1709003584, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','bag']])
}})
# *[shop=fashion]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fashion'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=clothes"
# fixAdd:"shop=clothes"
err.append({'class': 9002001, 'subclass': 985619804, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','clothes']])
}})
# *[shop=organic]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'organic'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=* + organic=only"
# suggestAlternative:"shop=* + organic=yes"
err.append({'class': 9002001, 'subclass': 1959365145, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=pets]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'pets'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=pet"
# fixAdd:"shop=pet"
err.append({'class': 9002001, 'subclass': 290270098, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','pet']])
}})
# *[shop=pharmacy]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'pharmacy'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=pharmacy"
# fixChangeKey:"shop => amenity"
err.append({'class': 9002001, 'subclass': 350722657, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'shop')]]),
'-': ([
'shop'])
}})
# *[bicycle_parking=sheffield]
if ('bicycle_parking' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bicycle_parking') == mapcss._value_capture(capture_tags, 0, 'sheffield'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bicycle_parking=stands"
# fixAdd:"bicycle_parking=stands"
err.append({'class': 9002001, 'subclass': 718874663, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bicycle_parking','stands']])
}})
# *[amenity=emergency_phone]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'emergency_phone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=phone"
# fixRemove:"amenity"
# fixAdd:"emergency=phone"
err.append({'class': 9002001, 'subclass': 1108230656, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency','phone']]),
'-': ([
'amenity'])
}})
# *[sport=gaelic_football]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'gaelic_football'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=gaelic_games"
# fixAdd:"sport=gaelic_games"
err.append({'class': 9002001, 'subclass': 1768681881, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','gaelic_games']])
}})
# *[power=station]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=plant"
# suggestAlternative:"power=substation"
err.append({'class': 9002001, 'subclass': 52025933, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[power=sub_station]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'sub_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"power=substation"
# fixAdd:"power=substation"
err.append({'class': 9002001, 'subclass': 1423074682, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['power','substation']])
}})
# *[location=rooftop]
if ('location' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'location') == mapcss._value_capture(capture_tags, 0, 'rooftop'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location=roof"
# fixAdd:"location=roof"
err.append({'class': 9002001, 'subclass': 1028577225, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location','roof']])
}})
# *[generator:location]
if ('generator:location' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:location'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"location"
# fixChangeKey:"generator:location => location"
err.append({'class': 9002001, 'subclass': 900615917, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location', mapcss.tag(tags, 'generator:location')]]),
'-': ([
'generator:location'])
}})
# *[generator:method=dam]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'dam'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-storage"
# fixAdd:"generator:method=water-storage"
err.append({'class': 9002001, 'subclass': 248819368, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-storage']])
}})
# *[generator:method=pumped-storage]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'pumped-storage'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-pumped-storage"
# fixAdd:"generator:method=water-pumped-storage"
err.append({'class': 9002001, 'subclass': 93454158, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-pumped-storage']])
}})
# *[generator:method=pumping]
if ('generator:method' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:method') == mapcss._value_capture(capture_tags, 0, 'pumping'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:method=water-pumped-storage"
# fixAdd:"generator:method=water-pumped-storage"
err.append({'class': 9002001, 'subclass': 2115673716, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:method','water-pumped-storage']])
}})
# *[fence_type=chain]
if ('fence_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fence_type') == mapcss._value_capture(capture_tags, 0, 'chain'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"barrier=chain"
# suggestAlternative:"barrier=fence + fence_type=chain_link"
err.append({'class': 9002001, 'subclass': 19409288, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=entrance]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'entrance'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"entrance"
err.append({'class': 9002001, 'subclass': 306662985, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[board_type=board]
if ('board_type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'board_type') == mapcss._value_capture(capture_tags, 0, 'board'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixRemove:"board_type"
err.append({'class': 9002001, 'subclass': 1150949316, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'board_type'])
}})
# *[man_made=measurement_station]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'measurement_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=monitoring_station"
# fixAdd:"man_made=monitoring_station"
err.append({'class': 9002001, 'subclass': 700465123, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','monitoring_station']])
}})
# *[measurement=water_level]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'water_level'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:water_level=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:water_level=yes"
err.append({'class': 9002001, 'subclass': 634647702, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:water_level','yes']]),
'-': ([
'measurement'])
}})
# *[measurement=weather]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'weather'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:weather=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:weather=yes"
err.append({'class': 9002001, 'subclass': 336627227, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:weather','yes']]),
'-': ([
'measurement'])
}})
# *[measurement=seismic]
if ('measurement' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'measurement') == mapcss._value_capture(capture_tags, 0, 'seismic'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"monitoring:seismic_activity=yes"
# fixRemove:"measurement"
# fixAdd:"monitoring:seismic_activity=yes"
err.append({'class': 9002001, 'subclass': 1402131289, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:seismic_activity','yes']]),
'-': ([
'measurement'])
}})
# *[monitoring:river_level]
if ('monitoring:river_level' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'monitoring:river_level'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"monitoring:water_level"
# fixChangeKey:"monitoring:river_level => monitoring:water_level"
err.append({'class': 9002001, 'subclass': 264907924, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['monitoring:water_level', mapcss.tag(tags, 'monitoring:river_level')]]),
'-': ([
'monitoring:river_level'])
}})
# *[stay]
if ('stay' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'stay'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"maxstay"
# fixChangeKey:"stay => maxstay"
err.append({'class': 9002001, 'subclass': 787370129, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['maxstay', mapcss.tag(tags, 'stay')]]),
'-': ([
'stay'])
}})
# *[emergency=aed]
if ('emergency' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'emergency') == mapcss._value_capture(capture_tags, 0, 'aed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"emergency=defibrillator"
# fixAdd:"emergency=defibrillator"
err.append({'class': 9002001, 'subclass': 707111885, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['emergency','defibrillator']])
}})
# *[day_on][!restriction]
# *[day_off][!restriction]
# *[date_on][!restriction]
# *[date_off][!restriction]
# *[hour_on][!restriction]
# *[hour_off][!restriction]
if ('date_off' in keys) or ('date_on' in keys) or ('day_off' in keys) or ('day_on' in keys) or ('hour_off' in keys) or ('hour_on' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'day_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'day_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'date_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'date_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hour_on') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hour_off') and not mapcss._tag_capture(capture_tags, 1, tags, 'restriction'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"*:conditional"
err.append({'class': 9002001, 'subclass': 294264920, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[access=designated]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'designated'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("''{0}'' is meaningless, use more specific tags, e.g. ''{1}''","access=designated","bicycle=designated")
err.append({'class': 9002002, 'subclass': 2057594338, 'text': mapcss.tr('\'\'{0}\'\' is meaningless, use more specific tags, e.g. \'\'{1}\'\'', 'access=designated', 'bicycle=designated')})
# *[access=official]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'official'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("''{0}'' does not specify the official mode of transportation, use ''{1}'' for example","access=official","bicycle=official")
err.append({'class': 9002003, 'subclass': 1909133836, 'text': mapcss.tr('\'\'{0}\'\' does not specify the official mode of transportation, use \'\'{1}\'\' for example', 'access=official', 'bicycle=official')})
# *[fixme=yes]
# *[FIXME=yes]
if ('FIXME' in keys) or ('fixme' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fixme') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'FIXME') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0}={1} is unspecific. Instead of ''{1}'' please give more information about what exactly should be fixed.","{0.key}","{0.value}")
err.append({'class': 9002004, 'subclass': 136657482, 'text': mapcss.tr('{0}={1} is unspecific. Instead of \'\'{1}\'\' please give more information about what exactly should be fixed.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[name][name=~/^(?i)fixme$/]
if ('name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'name') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_1f92073a), mapcss._tag_capture(capture_tags, 1, tags, 'name')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.","{0.key}","{0.tag}")
# fixRemove:"name"
err.append({'class': 9002005, 'subclass': 642340557, 'text': mapcss.tr('Wrong usage of {0} tag. Remove {1}, because it is clear that the name is missing even without an additional tag.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'name'])
}})
# *[note][note=~/^(?i)fixme$/]
if ('note' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'note') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_1f92073a), mapcss._tag_capture(capture_tags, 1, tags, 'note')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.","{0.tag}")
err.append({'class': 9002006, 'subclass': 1243120287, 'text': mapcss.tr('{0} is unspecific. Instead use the key fixme with the information what exactly should be fixed in the value of fixme.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[type=broad_leaved]
# *[type=broad_leafed]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'broad_leaved'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'broad_leafed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=broadleaved"
# fixAdd:"leaf_type=broadleaved"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 293968062, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','broadleaved']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[wood=coniferous]
# *[type=coniferous]
# *[type=conifer]
if ('type' in keys) or ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'coniferous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'coniferous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'conifer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=needleleaved"
# fixAdd:"leaf_type=needleleaved"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 50517650, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','needleleaved']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[wood=mixed]
if ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'mixed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type=mixed"
# fixAdd:"leaf_type=mixed"
# fixRemove:"wood"
err.append({'class': 9002001, 'subclass': 235914603, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_type','mixed']]),
'-': ([
'wood'])
}})
# *[wood=evergreen]
# *[type=evergreen]
if ('type' in keys) or ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'evergreen'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'evergreen'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_cycle=evergreen"
# fixAdd:"leaf_cycle=evergreen"
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 747964532, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_cycle','evergreen']]),
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[type=deciduous]
# *[type=deciduos]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'deciduous'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'deciduos'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_cycle=deciduous"
# fixAdd:"leaf_cycle=deciduous"
# fixRemove:"type"
err.append({'class': 9002001, 'subclass': 591116099, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leaf_cycle','deciduous']]),
'-': ([
'type'])
}})
# *[wood=deciduous]
if ('wood' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'wood') == mapcss._value_capture(capture_tags, 0, 'deciduous'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leaf_type + leaf_cycle"
err.append({'class': 9002001, 'subclass': 1100223594, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=land]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'land'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Please use instead a multipolygon.","{0.tag}")
err.append({'class': 9002001, 'subclass': 94558529, 'text': mapcss.tr('{0} is deprecated. Please use instead a multipolygon.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=causeway]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'causeway'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=low_water_crossing"
# suggestAlternative:"embankment=yes"
# suggestAlternative:"ford=yes"
err.append({'class': 9002001, 'subclass': 461671124, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=swing]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'swing'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge:movable=swing"
# suggestAlternative:"bridge:structure=simple-suspension"
err.append({'class': 9002001, 'subclass': 1047428067, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[bridge=suspension]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=yes + bridge:structure=suspension"
# fixAdd:"bridge:structure=suspension"
# fixAdd:"bridge=yes"
err.append({'class': 9002001, 'subclass': 1157046268, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:structure','suspension'],
['bridge','yes']])
}})
# *[bridge=pontoon]
if ('bridge' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge') == mapcss._value_capture(capture_tags, 0, 'pontoon'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"bridge=yes + bridge:structure=floating"
# fixAdd:"bridge:structure=floating"
# fixAdd:"bridge=yes"
err.append({'class': 9002001, 'subclass': 1195531951, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:structure','floating'],
['bridge','yes']])
}})
# *[fee=interval]
# *[lit=interval]
# *[supervised=interval]
if ('fee' in keys) or ('lit' in keys) or ('supervised' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fee') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lit') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'supervised') == mapcss._value_capture(capture_tags, 0, 'interval'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Please specify interval by using opening_hours syntax","{0.tag}")
err.append({'class': 9002001, 'subclass': 417886592, 'text': mapcss.tr('{0} is deprecated. Please specify interval by using opening_hours syntax', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[/josm\/ignore/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_5ee0acf2))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwError:tr("{0} is deprecated. Please delete this object and use a private layer instead","{0.key}")
# fixDeleteObject:this
err.append({'class': 9002001, 'subclass': 1402743016, 'text': mapcss.tr('{0} is deprecated. Please delete this object and use a private layer instead', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[sport=diving]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'diving'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=cliff_diving"
# suggestAlternative:"sport=scuba_diving"
err.append({'class': 9002001, 'subclass': 590643159, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[parking=park_and_ride]
if ('parking' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'parking') == mapcss._value_capture(capture_tags, 0, 'park_and_ride'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=parking + park_ride=yes"
# fixAdd:"amenity=parking"
# fixAdd:"park_ride=yes"
# fixRemove:"parking"
err.append({'class': 9002001, 'subclass': 1893516041, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','parking'],
['park_ride','yes']]),
'-': ([
'parking'])
}})
# *[playground=yes]
# *[manhole=plain]
# *[manhole=unknown]
# *[manhole=yes]
# *[police=yes]
# *[traffic_calming=yes]
# *[access=restricted]
# *[barrier=yes]
# *[aerialway=yes][!public_transport]
# *[amenity=yes]
# *[leisure=yes]
# *[shop="*"]
# *[shop=yes][amenity!=fuel]
# *[craft=yes]
# *[service=yes]
# *[place=yes]
if ('access' in keys) or ('aerialway' in keys) or ('amenity' in keys) or ('barrier' in keys) or ('craft' in keys) or ('leisure' in keys) or ('manhole' in keys) or ('place' in keys) or ('playground' in keys) or ('police' in keys) or ('service' in keys) or ('shop' in keys) or ('traffic_calming' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'playground') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'plain'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'manhole') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'police') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'traffic_calming') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'restricted'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'barrier') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'aerialway') == mapcss._value_capture(capture_tags, 0, 'yes') and not mapcss._tag_capture(capture_tags, 1, tags, 'public_transport'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, '*'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'yes') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') != mapcss._value_const_capture(capture_tags, 1, 'fuel', 'fuel'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'craft') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'service') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0}={1} is unspecific. Please replace ''{1}'' by a specific value.","{0.key}","{0.value}")
err.append({'class': 9002007, 'subclass': 727505823, 'text': mapcss.tr('{0}={1} is unspecific. Please replace \'\'{1}\'\' by a specific value.', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[place_name][!name]
if ('place_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place_name') and not mapcss._tag_capture(capture_tags, 1, tags, 'name'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} should be replaced with {1}","{0.key}","{1.key}")
# fixChangeKey:"place_name => name"
err.append({'class': 9002008, 'subclass': 1089331760, 'text': mapcss.tr('{0} should be replaced with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['name', mapcss.tag(tags, 'place_name')]]),
'-': ([
'place_name'])
}})
# *[place][place_name=*name]
if ('place' in keys and 'place_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'place') and mapcss._tag_capture(capture_tags, 1, tags, 'place_name') == mapcss._value_capture(capture_tags, 1, mapcss.tag(tags, 'name')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} = {1}; remove {0}","{1.key}","{1.value}")
# fixRemove:"{1.key}"
err.append({'class': 9002009, 'subclass': 1116761280, 'text': mapcss.tr('{0} = {1}; remove {0}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{1.value}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{1.key}')])
}})
# *[waterway=water_point]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'water_point'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=water_point"
# fixChangeKey:"waterway => amenity"
err.append({'class': 9002001, 'subclass': 103347605, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'waterway')]]),
'-': ([
'waterway'])
}})
# *[waterway=waste_disposal]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'waste_disposal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=waste_disposal"
# fixChangeKey:"waterway => amenity"
err.append({'class': 9002001, 'subclass': 1963461348, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'waterway')]]),
'-': ([
'waterway'])
}})
# *[waterway=mooring]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'mooring'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"mooring=yes"
# fixAdd:"mooring=yes"
# fixRemove:"waterway"
err.append({'class': 9002001, 'subclass': 81358738, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['mooring','yes']]),
'-': ([
'waterway'])
}})
# *[building][levels]
# *[building:part=yes][levels]
if ('building' in keys and 'levels' in keys) or ('building:part' in keys and 'levels' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') and mapcss._tag_capture(capture_tags, 1, tags, 'levels'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:part') == mapcss._value_capture(capture_tags, 0, 'yes') and mapcss._tag_capture(capture_tags, 1, tags, 'levels'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.key}")
# suggestAlternative:"building:levels"
# fixChangeKey:"levels => building:levels"
err.append({'class': 9002001, 'subclass': 293177436, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building:levels', mapcss.tag(tags, 'levels')]]),
'-': ([
'levels'])
}})
# *[protected_class]
if ('protected_class' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'protected_class'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"protect_class"
# fixChangeKey:"protected_class => protect_class"
err.append({'class': 9002001, 'subclass': 716999373, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['protect_class', mapcss.tag(tags, 'protected_class')]]),
'-': ([
'protected_class'])
}})
# *[kerb=unknown]
# *[lock=unknown]
# *[hide=unknown]
# *[shelter=unknown]
# *[access=unknown]
# *[capacity:parent=unknown]
# *[capacity:women=unknown]
# *[capacity:disabled=unknown]
# *[crossing=unknown]
# *[foot=unknown]
if ('access' in keys) or ('capacity:disabled' in keys) or ('capacity:parent' in keys) or ('capacity:women' in keys) or ('crossing' in keys) or ('foot' in keys) or ('hide' in keys) or ('kerb' in keys) or ('lock' in keys) or ('shelter' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kerb') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'lock') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'hide') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shelter') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:parent') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:women') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'capacity:disabled') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'crossing') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'foot') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Unspecific tag {0}","{0.tag}")
err.append({'class': 9002010, 'subclass': 1052866123, 'text': mapcss.tr('Unspecific tag {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=skiing]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'skiing'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("Definition of {0} is unclear","{0.tag}")
# suggestAlternative:tr("{0} + {1} + {2}","piste:type=*","piste:difficulty=*","piste:grooming=*")
err.append({'class': 9002001, 'subclass': 1578959559, 'text': mapcss.tr('Definition of {0} is unclear', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[waterway=wadi]
if ('waterway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'waterway') == mapcss._value_capture(capture_tags, 0, 'wadi'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=valley"
# suggestAlternative:"{0.key}=* + intermittent=yes"
err.append({'class': 9002001, 'subclass': 719234223, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[drinkable]
if ('drinkable' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'drinkable'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"drinking_water"
err.append({'class': 9002001, 'subclass': 1785584789, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[color][!colour]
if ('color' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'color') and not mapcss._tag_capture(capture_tags, 1, tags, 'colour'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"colour"
# fixChangeKey:"color => colour"
err.append({'class': 9002001, 'subclass': 1850270072, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['colour', mapcss.tag(tags, 'color')]]),
'-': ([
'color'])
}})
# *[color][colour][color=*colour]
if ('color' in keys and 'colour' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'color') and mapcss._tag_capture(capture_tags, 1, tags, 'colour') and mapcss._tag_capture(capture_tags, 2, tags, 'color') == mapcss._value_capture(capture_tags, 2, mapcss.tag(tags, 'colour')))
except mapcss.RuleAbort: pass
if match:
# setsamecolor
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1}","{0.key}","{1.key}")
# suggestAlternative:"colour"
# fixRemove:"color"
set_samecolor = True
err.append({'class': 9002001, 'subclass': 1825345743, 'text': mapcss.tr('{0} together with {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
'color'])
}})
# *[color][colour]!.samecolor
if ('color' in keys and 'colour' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_samecolor and mapcss._tag_capture(capture_tags, 0, tags, 'color') and mapcss._tag_capture(capture_tags, 1, tags, 'colour'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} together with {1} and conflicting values","{0.key}","{1.key}")
# suggestAlternative:"colour"
err.append({'class': 9002001, 'subclass': 1064658218, 'text': mapcss.tr('{0} together with {1} and conflicting values', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.key}'))})
# *[building:color][building:colour]!.samebuildingcolor
# Rule Blacklisted
# *[roof:color][roof:colour]!.sameroofcolor
# Rule Blacklisted
# *[/:color/][!building:color][!roof:color][!gpxd:color]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_554de4c7) and not mapcss._tag_capture(capture_tags, 1, tags, 'building:color') and not mapcss._tag_capture(capture_tags, 2, tags, 'roof:color') and not mapcss._tag_capture(capture_tags, 3, tags, 'gpxd:color'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:":colour"
err.append({'class': 9002001, 'subclass': 1632389707, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/color:/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_0c5b5730))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"colour:"
err.append({'class': 9002001, 'subclass': 1390370717, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/=|\+|\/|&|<|>|;|'|"|%|#|@|\\|,|\.|\{|\}|\?|\*|\^|\$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_620f4d52))
except mapcss.RuleAbort: pass
if match:
# group:tr("key with uncommon character")
# throwWarning:tr("{0}","{0.key}")
err.append({'class': 9002011, 'subclass': 1752615188, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[/^.$/]
# relation[/^..$/][!to]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_27210286))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_34c15d62) and not mapcss._tag_capture(capture_tags, 1, tags, 'to'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("uncommon short key")
# assertMatch:"relation fo=bar"
# assertNoMatch:"relation to=Berlin"
err.append({'class': 9002012, 'subclass': 518970721, 'text': mapcss.tr('uncommon short key')})
# *[sport=hockey]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'hockey'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=field_hockey"
# suggestAlternative:"sport=ice_hockey"
err.append({'class': 9002001, 'subclass': 651933474, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=billard]
# *[sport=billards]
# *[sport=billiard]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billard'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billards'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'billiard'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=billiards"
# fixAdd:"sport=billiards"
err.append({'class': 9002001, 'subclass': 1522897824, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','billiards']])
}})
# *[payment:credit_cards=yes]
if ('payment:credit_cards' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:credit_cards') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:mastercard=yes","payment:visa=yes")
err.append({'class': 9002013, 'subclass': 705181097, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:mastercard=yes', 'payment:visa=yes')})
# *[payment:debit_cards=yes]
if ('payment:debit_cards' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:debit_cards') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:maestro=yes","payment:girocard=yes")
err.append({'class': 9002013, 'subclass': 679215558, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:maestro=yes', 'payment:girocard=yes')})
# *[payment:electronic_purses=yes]
if ('payment:electronic_purses' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:electronic_purses') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:ep_geldkarte=yes","payment:ep_quick=yes")
err.append({'class': 9002013, 'subclass': 1440457244, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:ep_geldkarte=yes', 'payment:ep_quick=yes')})
# *[payment:cryptocurrencies=yes]
if ('payment:cryptocurrencies' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:cryptocurrencies') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.","{0.tag}","payment:bitcoin=yes","payment:litecoin=yes")
err.append({'class': 9002013, 'subclass': 1325255949, 'text': mapcss.tr('{0} is inaccurate. Use separate tags for each specific type, e.g. {1} or {2}.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), 'payment:bitcoin=yes', 'payment:litecoin=yes')})
# *[payment:ep_quick]
# *[payment:ep_cash]
# *[payment:ep_proton]
# *[payment:ep_chipknip]
if ('payment:ep_cash' in keys) or ('payment:ep_chipknip' in keys) or ('payment:ep_proton' in keys) or ('payment:ep_quick' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_quick'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_cash'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_proton'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'payment:ep_chipknip'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 332575437, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[kp][railway!=milestone]
if ('kp' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kp') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') != mapcss._value_const_capture(capture_tags, 1, 'milestone', 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"distance"
# fixChangeKey:"kp => distance"
err.append({'class': 9002001, 'subclass': 1256703107, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['distance', mapcss.tag(tags, 'kp')]]),
'-': ([
'kp'])
}})
# *[pk][railway!=milestone]
if ('pk' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pk') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') != mapcss._value_const_capture(capture_tags, 1, 'milestone', 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"distance"
# fixChangeKey:"pk => distance"
err.append({'class': 9002001, 'subclass': 1339969759, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['distance', mapcss.tag(tags, 'pk')]]),
'-': ([
'pk'])
}})
# *[kp][railway=milestone]
if ('kp' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kp') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"railway:position"
# fixChangeKey:"kp => railway:position"
err.append({'class': 9002001, 'subclass': 1667272140, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'kp')]]),
'-': ([
'kp'])
}})
# *[pk][railway=milestone]
if ('pk' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'pk') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"railway:position"
# fixChangeKey:"pk => railway:position"
err.append({'class': 9002001, 'subclass': 691355164, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'pk')]]),
'-': ([
'pk'])
}})
# *[distance][railway=milestone]
if ('distance' in keys and 'railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'distance') and mapcss._tag_capture(capture_tags, 1, tags, 'railway') == mapcss._value_capture(capture_tags, 1, 'milestone'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"railway:position"
# fixChangeKey:"distance => railway:position"
err.append({'class': 9002001, 'subclass': 113691181, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['railway:position', mapcss.tag(tags, 'distance')]]),
'-': ([
'distance'])
}})
# *[postcode]
if ('postcode' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'postcode'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"addr:postcode"
# suggestAlternative:"postal_code"
err.append({'class': 9002001, 'subclass': 1942523538, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[water=intermittent]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'intermittent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"intermittent=yes"
# fixAdd:"intermittent=yes"
# fixRemove:"water"
err.append({'class': 9002001, 'subclass': 813530321, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['intermittent','yes']]),
'-': ([
'water'])
}})
# *[landuse=farm]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'farm'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=farmland"
# suggestAlternative:"landuse=farmyard"
err.append({'class': 9002001, 'subclass': 1968473048, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[seamark=buoy]["seamark:type"=~/^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$/]
if ('seamark' in keys and 'seamark:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'seamark') == mapcss._value_capture(capture_tags, 0, 'buoy') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_61b0be1b), mapcss._tag_capture(capture_tags, 1, tags, 'seamark:type')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"{1.tag}"
# fixRemove:"seamark"
err.append({'class': 9002001, 'subclass': 1224401740, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'seamark'])
}})
# *[seamark=buoy]["seamark:type"!~/^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$/]
if ('seamark' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'seamark') == mapcss._value_capture(capture_tags, 0, 'buoy') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_61b0be1b, '^(buoy_cardinal|buoy_installation|buoy_isolated_danger|buoy_lateral|buoy_safe_water|buoy_special_purpose|mooring)$'), mapcss._tag_capture(capture_tags, 1, tags, 'seamark:type')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"{1.tag}"
err.append({'class': 9002001, 'subclass': 1481035998, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[landuse=conservation]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'conservation'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"boundary=protected_area"
# fixAdd:"boundary=protected_area"
# fixRemove:"landuse"
err.append({'class': 9002001, 'subclass': 824801072, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['boundary','protected_area']]),
'-': ([
'landuse'])
}})
# *[amenity=kiosk]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'kiosk'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=kiosk"
# fixChangeKey:"amenity => shop"
err.append({'class': 9002001, 'subclass': 1331930630, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=shop]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'shop'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=*"
err.append({'class': 9002001, 'subclass': 1562207150, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=fishmonger]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fishmonger'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=seafood"
# fixAdd:"shop=seafood"
err.append({'class': 9002001, 'subclass': 1376789416, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','seafood']])
}})
# *[shop=fish]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'fish'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=fishing"
# suggestAlternative:"shop=pet"
# suggestAlternative:"shop=seafood"
err.append({'class': 9002001, 'subclass': 47191734, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=betting]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'betting'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=casino"
# suggestAlternative:"amenity=gambling"
# suggestAlternative:"leisure=adult_gaming_centre"
# suggestAlternative:"leisure=amusement_arcade"
# suggestAlternative:"shop=bookmaker"
# suggestAlternative:"shop=lottery"
err.append({'class': 9002001, 'subclass': 1035501389, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=perfume]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'perfume'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=perfumery"
# fixAdd:"shop=perfumery"
err.append({'class': 9002001, 'subclass': 2075099676, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','perfumery']])
}})
# *[amenity=exercise_point]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'exercise_point'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=fitness_station"
# fixRemove:"amenity"
# fixAdd:"leisure=fitness_station"
err.append({'class': 9002001, 'subclass': 1514920202, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure','fitness_station']]),
'-': ([
'amenity'])
}})
# *[shop=auto_parts]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'auto_parts'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=car_parts"
# fixAdd:"shop=car_parts"
err.append({'class': 9002001, 'subclass': 1675828779, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','car_parts']])
}})
# *[amenity=car_repair]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'car_repair'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=car_repair"
# fixChangeKey:"amenity => shop"
err.append({'class': 9002001, 'subclass': 1681273585, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=studio][type=audio]
# *[amenity=studio][type=radio]
# *[amenity=studio][type=television]
# *[amenity=studio][type=video]
if ('amenity' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'audio'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'radio'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'television'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'studio') and mapcss._tag_capture(capture_tags, 1, tags, 'type') == mapcss._value_capture(capture_tags, 1, 'video'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"studio"
# fixChangeKey:"type => studio"
err.append({'class': 9002001, 'subclass': 413401822, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['studio', mapcss.tag(tags, 'type')]]),
'-': ([
'type'])
}})
# *[power=cable_distribution_cabinet]
if ('power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') == mapcss._value_capture(capture_tags, 0, 'cable_distribution_cabinet'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=street_cabinet + street_cabinet=*"
# fixAdd:"man_made=street_cabinet"
# fixRemove:"power"
err.append({'class': 9002001, 'subclass': 1007567078, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','street_cabinet']]),
'-': ([
'power'])
}})
# *[power][location=kiosk]
if ('location' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'power') and mapcss._tag_capture(capture_tags, 1, tags, 'location') == mapcss._value_capture(capture_tags, 1, 'kiosk'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{1.tag}")
# fixRemove:"location"
# fixAdd:"man_made=street_cabinet"
# fixAdd:"street_cabinet=power"
err.append({'class': 9002001, 'subclass': 182905067, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{1.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','street_cabinet'],
['street_cabinet','power']]),
'-': ([
'location'])
}})
# *[man_made=well]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'well'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=petroleum_well"
# suggestAlternative:"man_made=water_well"
err.append({'class': 9002001, 'subclass': 1740864107, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=dog_bin]
# *[amenity=dog_waste_bin]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'dog_bin'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'dog_waste_bin'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=waste_basket + waste=dog_excrement + vending=excrement_bags"
# fixAdd:"amenity=waste_basket"
# fixAdd:"vending=excrement_bags"
# fixAdd:"waste=dog_excrement"
err.append({'class': 9002001, 'subclass': 2091877281, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','waste_basket'],
['vending','excrement_bags'],
['waste','dog_excrement']])
}})
# *[amenity=artwork]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'artwork'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=artwork"
# fixRemove:"amenity"
# fixAdd:"tourism=artwork"
err.append({'class': 9002001, 'subclass': 728429076, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tourism','artwork']]),
'-': ([
'amenity'])
}})
# *[amenity=community_center]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'community_center'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=community_centre"
# fixAdd:"amenity=community_centre"
err.append({'class': 9002001, 'subclass': 690512681, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','community_centre']])
}})
# *[man_made=cut_line]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'cut_line'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=cutline"
# fixAdd:"man_made=cutline"
err.append({'class': 9002001, 'subclass': 1008752382, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','cutline']])
}})
# *[amenity=park]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'park'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=park"
# fixRemove:"amenity"
# fixAdd:"leisure=park"
err.append({'class': 9002001, 'subclass': 2085280194, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure','park']]),
'-': ([
'amenity'])
}})
# *[amenity=hotel]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hotel'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=hotel"
# fixRemove:"amenity"
# fixAdd:"tourism=hotel"
err.append({'class': 9002001, 'subclass': 1341786818, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['tourism','hotel']]),
'-': ([
'amenity'])
}})
# *[shop=window]
# *[shop=windows]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'window'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'windows'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=window_construction"
# fixAdd:"craft=window_construction"
# fixRemove:"shop"
err.append({'class': 9002001, 'subclass': 532391183, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['craft','window_construction']]),
'-': ([
'shop'])
}})
# *[amenity=education]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'education'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=college"
# suggestAlternative:"amenity=school"
# suggestAlternative:"amenity=university"
err.append({'class': 9002001, 'subclass': 796960259, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=gallery]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'gallery'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=art"
# fixAdd:"shop=art"
err.append({'class': 9002001, 'subclass': 1319611546, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','art']])
}})
# *[shop=gambling]
# *[leisure=gambling]
if ('leisure' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'gambling'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'gambling'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=casino"
# suggestAlternative:"amenity=gambling"
# suggestAlternative:"leisure=amusement_arcade"
# suggestAlternative:"shop=bookmaker"
# suggestAlternative:"shop=lottery"
err.append({'class': 9002001, 'subclass': 1955724853, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[office=real_estate]
# *[office=real_estate_agent]
if ('office' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'real_estate'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'real_estate_agent'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=estate_agent"
# fixAdd:"office=estate_agent"
err.append({'class': 9002001, 'subclass': 2027311706, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['office','estate_agent']])
}})
# *[shop=glass]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'glass'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=glaziery"
# suggestAlternative:"shop=glaziery"
err.append({'class': 9002001, 'subclass': 712020531, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=proposed]
# *[amenity=disused]
# *[shop=disused]
# *[highway=abandoned]
# *[historic=abandoned]
if ('amenity' in keys) or ('highway' in keys) or ('historic' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'proposed'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated. Use the {1}: key prefix instead.","{0.tag}","{0.value}")
err.append({'class': 9002001, 'subclass': 847809313, 'text': mapcss.tr('{0} is deprecated. Use the {1}: key prefix instead.', mapcss._tag_uncapture(capture_tags, '{0.tag}'), mapcss._tag_uncapture(capture_tags, '{0.value}'))})
# *[amenity=swimming_pool]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'swimming_pool'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=swimming_pool"
# fixChangeKey:"amenity => leisure"
err.append({'class': 9002001, 'subclass': 2012807801, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[amenity=sauna]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'sauna'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=sauna"
# fixChangeKey:"amenity => leisure"
err.append({'class': 9002001, 'subclass': 1450116742, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['leisure', mapcss.tag(tags, 'amenity')]]),
'-': ([
'amenity'])
}})
# *[/^[^t][^i][^g].+_[0-9]$/][!/^note_[0-9]$/][!/^description_[0-9]$/]
if True:
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, self.re_300dfa36) and not mapcss._tag_capture(capture_tags, 1, tags, self.re_3185ac6d) and not mapcss._tag_capture(capture_tags, 2, tags, self.re_6d27b157))
except mapcss.RuleAbort: pass
if match:
# group:tr("questionable key (ending with a number)")
# throwWarning:tr("{0}","{0.key}")
err.append({'class': 9002014, 'subclass': 2081989305, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[sport=skating]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'skating'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=ice_skating"
# suggestAlternative:"sport=roller_skating"
err.append({'class': 9002001, 'subclass': 170699177, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=public_building]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'public_building'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"..."
# suggestAlternative:"amenity=community_centre"
# suggestAlternative:"amenity=hospital"
# suggestAlternative:"amenity=townhall"
# suggestAlternative:"building=hospital"
# suggestAlternative:"building=public"
# suggestAlternative:"leisure=sports_centre"
# suggestAlternative:"office=government"
err.append({'class': 9002001, 'subclass': 1295642010, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[office=administrative]
if ('office' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'office') == mapcss._value_capture(capture_tags, 0, 'administrative'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=government"
# fixAdd:"office=government"
err.append({'class': 9002001, 'subclass': 213844674, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['office','government']])
}})
# *[vending=news_papers]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'news_papers'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"vending=newspapers"
# fixAdd:"vending=newspapers"
err.append({'class': 9002001, 'subclass': 1133820292, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['vending','newspapers']])
}})
# *[service=drive_through]
if ('service' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'service') == mapcss._value_capture(capture_tags, 0, 'drive_through'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"service=drive-through"
# fixAdd:"service=drive-through"
err.append({'class': 9002001, 'subclass': 283545650, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['service','drive-through']])
}})
# *[noexit][noexit!=yes][noexit!=no]
if ('noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') and mapcss._tag_capture(capture_tags, 1, tags, 'noexit') != mapcss._value_const_capture(capture_tags, 1, 'yes', 'yes') and mapcss._tag_capture(capture_tags, 2, tags, 'noexit') != mapcss._value_const_capture(capture_tags, 2, 'no', 'no'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("The key {0} has an uncommon value.","{1.key}")
err.append({'class': 9002017, 'subclass': 1357403556, 'text': mapcss.tr('The key {0} has an uncommon value.', mapcss._tag_uncapture(capture_tags, '{1.key}'))})
# *[name:botanical]
if ('name:botanical' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'name:botanical'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"species"
err.append({'class': 9002001, 'subclass': 1061429000, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[shop=souvenir]
# *[shop=souvenirs]
# *[shop=souveniers]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souvenir'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souvenirs'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'souveniers'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=gift"
# fixAdd:"shop=gift"
err.append({'class': 9002001, 'subclass': 1794702946, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','gift']])
}})
# *[vending=animal_food]
if ('vending' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'vending') == mapcss._value_capture(capture_tags, 0, 'animal_food'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"vending=animal_feed"
# fixAdd:"vending=animal_feed"
err.append({'class': 9002001, 'subclass': 1077411296, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['vending','animal_feed']])
}})
# *[amenity=hunting_stand][lock=yes]
# *[amenity=hunting_stand][lock=no]
if ('amenity' in keys and 'lock' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hunting_stand') and mapcss._tag_capture(capture_tags, 1, tags, 'lock') == mapcss._value_capture(capture_tags, 1, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'hunting_stand') and mapcss._tag_capture(capture_tags, 1, tags, 'lock') == mapcss._value_capture(capture_tags, 1, 'no'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{1.key}","{0.tag}")
# suggestAlternative:"lockable"
# fixChangeKey:"lock => lockable"
err.append({'class': 9002001, 'subclass': 1939599742, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{1.key}'), mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['lockable', mapcss.tag(tags, 'lock')]]),
'-': ([
'lock'])
}})
# *[amenity=advertising][!advertising]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'advertising') and not mapcss._tag_capture(capture_tags, 1, tags, 'advertising'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"advertising=*"
err.append({'class': 9002001, 'subclass': 1696784412, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=advertising][advertising]
if ('advertising' in keys and 'amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'advertising') and mapcss._tag_capture(capture_tags, 1, tags, 'advertising'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"advertising=*"
# fixRemove:"amenity"
err.append({'class': 9002001, 'subclass': 1538706366, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'-': ([
'amenity'])
}})
# *[building=true]
# *[building="*"]
# *[building=Y]
# *[building=y]
# *[building=1]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'true'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, '*'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'Y'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'y'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 1))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"building=yes"
# fixAdd:"building=yes"
err.append({'class': 9002018, 'subclass': 596818855, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes']])
}})
# *[building=abandoned]
# *[building=address]
# *[building=bing]
# *[building=collapsed]
# *[building=damaged]
# *[building=demolished]
# *[building=disused]
# *[building=fixme]
# *[building=occupied]
# *[building=razed]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'abandoned'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'address'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'bing'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'collapsed'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'damaged'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'demolished'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'disused'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'fixme'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'occupied'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'razed'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is not a building type.","{0.tag}")
err.append({'class': 9002001, 'subclass': 938825828, 'text': mapcss.tr('{0} is not a building type.', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[building=other]
# *[building=unclassified]
# *[building=undefined]
# *[building=unknown]
# *[building=unidentified]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'other'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unclassified'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'undefined'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unknown'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'unidentified'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is not a building type.","{0.tag}")
# fixAdd:"building=yes"
err.append({'class': 9002001, 'subclass': 48721080, 'text': mapcss.tr('{0} is not a building type.', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes']])
}})
# relation[water=salt]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'salt'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=salt_pond"
# suggestAlternative:"salt=yes"
err.append({'class': 9002001, 'subclass': 1845964412, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=toilet]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'toilet'))
except mapcss.RuleAbort: pass
if match:
# group:tr("misspelled value")
# throwError:tr("{0}","{0.tag}")
# suggestAlternative:"amenity=toilets"
# fixAdd:"amenity=toilets"
err.append({'class': 9002018, 'subclass': 440018606, 'text': mapcss.tr('{0}', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','toilets']])
}})
# *[man_made=MDF]
# *[man_made=telephone_exchange]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'MDF'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'telephone_exchange'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixRemove:"man_made"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 634698090, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','exchange']]),
'-': ([
'man_made'])
}})
# *[building=central_office]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'central_office'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixAdd:"building=yes"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 1091970270, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','yes'],
['telecom','exchange']])
}})
# *[telecom=central_office]
if ('telecom' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'telecom') == mapcss._value_capture(capture_tags, 0, 'central_office'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"telecom=exchange"
# fixAdd:"telecom=exchange"
err.append({'class': 9002001, 'subclass': 1503278830, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['telecom','exchange']])
}})
# *[natural=waterfall]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'waterfall'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"waterway=waterfall"
# fixChangeKey:"natural => waterway"
err.append({'class': 9002001, 'subclass': 764711734, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['waterway', mapcss.tag(tags, 'natural')]]),
'-': ([
'natural'])
}})
# *[religion=unitarian]
if ('religion' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'religion') == mapcss._value_capture(capture_tags, 0, 'unitarian'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"religion=unitarian_universalist"
# fixAdd:"religion=unitarian_universalist"
err.append({'class': 9002001, 'subclass': 9227331, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['religion','unitarian_universalist']])
}})
# *[shop=shopping_centre]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'shopping_centre'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=mall"
# fixAdd:"shop=mall"
err.append({'class': 9002001, 'subclass': 1448390566, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','mall']])
}})
# *[is_in]
if ('is_in' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'is_in'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"{0.key}"
err.append({'class': 9002001, 'subclass': 981454091, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
mapcss._tag_uncapture(capture_tags, '{0.key}')])
}})
# *[sport=football]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'football'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=american_football"
# suggestAlternative:"sport=australian_football"
# suggestAlternative:"sport=canadian_football"
# suggestAlternative:"sport=gaelic_games"
# suggestAlternative:"sport=rugby_league"
# suggestAlternative:"sport=rugby_union"
# suggestAlternative:"sport=soccer"
err.append({'class': 9002001, 'subclass': 73038577, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=common]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'common'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"designation=common"
# suggestAlternative:"landuse=*"
# suggestAlternative:"leisure=*"
err.append({'class': 9002001, 'subclass': 157636301, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[cuisine=vegan]
# *[cuisine=vegetarian]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') == mapcss._value_capture(capture_tags, 0, 'vegan'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') == mapcss._value_capture(capture_tags, 0, 'vegetarian'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("diet:","{0.value}","=only")
# suggestAlternative:concat("diet:","{0.value}","=yes")
# throwWarning:tr("{0} is deprecated","{0.tag}")
err.append({'class': 9002001, 'subclass': 43604574, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[kitchen_hours]
if ('kitchen_hours' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'kitchen_hours'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"opening_hours:kitchen"
# fixChangeKey:"kitchen_hours => opening_hours:kitchen"
err.append({'class': 9002001, 'subclass': 1088306802, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['opening_hours:kitchen', mapcss.tag(tags, 'kitchen_hours')]]),
'-': ([
'kitchen_hours'])
}})
# *[shop=money_transfer]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'money_transfer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=money_transfer"
# fixChangeKey:"shop => amenity"
err.append({'class': 9002001, 'subclass': 1664997936, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity', mapcss.tag(tags, 'shop')]]),
'-': ([
'shop'])
}})
# *[contact:google_plus]
if ('contact:google_plus' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'contact:google_plus'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# fixRemove:"contact:google_plus"
err.append({'class': 9002001, 'subclass': 1869461154, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'-': ([
'contact:google_plus'])
}})
# *[amenity=garages]
# *[amenity=garage]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'garages'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'garage'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# suggestAlternative:concat("building=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=parking + parking=garage_boxes"
# suggestAlternative:"landuse=garages"
err.append({'class': 9002001, 'subclass': 863228118, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=winery]
# *[amenity=winery]
if ('amenity' in keys) or ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'winery'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'winery'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=winery"
# suggestAlternative:"shop=wine"
err.append({'class': 9002001, 'subclass': 1773574987, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[amenity=youth_centre]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'youth_centre'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"amenity=community_centre + community_centre=youth_centre"
# fixAdd:"amenity=community_centre"
# fixAdd:"community_centre=youth_centre"
err.append({'class': 9002001, 'subclass': 1284929085, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['amenity','community_centre'],
['community_centre','youth_centre']])
}})
# *[building:type][building=yes]
# *[building:type][!building]
if ('building' in keys and 'building:type' in keys) or ('building:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and mapcss._tag_capture(capture_tags, 1, tags, 'building') == mapcss._value_capture(capture_tags, 1, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and not mapcss._tag_capture(capture_tags, 1, tags, 'building'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"building"
# fixChangeKey:"building:type => building"
err.append({'class': 9002001, 'subclass': 1927794430, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building', mapcss.tag(tags, 'building:type')]]),
'-': ([
'building:type'])
}})
# *[building:type][building][building!=yes]
if ('building' in keys and 'building:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:type') and mapcss._tag_capture(capture_tags, 1, tags, 'building') and mapcss._tag_capture(capture_tags, 2, tags, 'building') != mapcss._value_const_capture(capture_tags, 2, 'yes', 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"building"
err.append({'class': 9002001, 'subclass': 1133239698, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[escalator]
if ('escalator' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'escalator'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"highway=steps + conveying=*"
err.append({'class': 9002001, 'subclass': 967271828, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[fenced]
if ('fenced' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'fenced'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"barrier=fence"
err.append({'class': 9002001, 'subclass': 1141285220, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[historic_name][!old_name]
if ('historic_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic_name') and not mapcss._tag_capture(capture_tags, 1, tags, 'old_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"old_name"
# fixChangeKey:"historic_name => old_name"
err.append({'class': 9002001, 'subclass': 1034538127, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['old_name', mapcss.tag(tags, 'historic_name')]]),
'-': ([
'historic_name'])
}})
# *[historic_name][old_name]
if ('historic_name' in keys and 'old_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'historic_name') and mapcss._tag_capture(capture_tags, 1, tags, 'old_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"old_name"
err.append({'class': 9002001, 'subclass': 30762614, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[landuse=field]
if ('landuse' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'landuse') == mapcss._value_capture(capture_tags, 0, 'field'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=farmland"
# fixAdd:"landuse=farmland"
err.append({'class': 9002001, 'subclass': 426261497, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['landuse','farmland']])
}})
# *[leisure=beach]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'beach'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=beach_resort"
# suggestAlternative:"natural=beach"
err.append({'class': 9002001, 'subclass': 1767286055, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=club]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'club'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"club=*"
err.append({'class': 9002001, 'subclass': 1282397509, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[leisure=video_arcade]
if ('leisure' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'leisure') == mapcss._value_capture(capture_tags, 0, 'video_arcade'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"leisure=adult_gaming_centre"
# suggestAlternative:"leisure=amusement_arcade"
err.append({'class': 9002001, 'subclass': 1463909830, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[man_made=jetty]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'jetty'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=pier"
# fixAdd:"man_made=pier"
err.append({'class': 9002001, 'subclass': 192707176, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','pier']])
}})
# *[man_made=village_pump]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'village_pump'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=water_well"
# fixAdd:"man_made=water_well"
err.append({'class': 9002001, 'subclass': 423232686, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['man_made','water_well']])
}})
# *[man_made=water_tank]
if ('man_made' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'man_made') == mapcss._value_capture(capture_tags, 0, 'water_tank'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"man_made=storage_tank + content=water"
# fixAdd:"content=water"
# fixAdd:"man_made=storage_tank"
err.append({'class': 9002001, 'subclass': 563629665, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['content','water'],
['man_made','storage_tank']])
}})
# *[natural=moor]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'moor'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"landuse=meadow + meadow=agricultural"
# suggestAlternative:"natural=fell"
# suggestAlternative:"natural=grassland"
# suggestAlternative:"natural=heath"
# suggestAlternative:"natural=scrub"
# suggestAlternative:"natural=tundra"
# suggestAlternative:"natural=wetland"
err.append({'class': 9002001, 'subclass': 374637717, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[noexit=no][!fixme]
if ('noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') == mapcss._value_capture(capture_tags, 0, 'no') and not mapcss._tag_capture(capture_tags, 1, tags, 'fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"fixme=continue"
# fixAdd:"fixme=continue"
# fixRemove:"noexit"
err.append({'class': 9002001, 'subclass': 647435126, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['fixme','continue']]),
'-': ([
'noexit'])
}})
# *[noexit=no][fixme]
if ('fixme' in keys and 'noexit' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'noexit') == mapcss._value_capture(capture_tags, 0, 'no') and mapcss._tag_capture(capture_tags, 1, tags, 'fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"fixme=continue"
err.append({'class': 9002001, 'subclass': 881828009, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=dive]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'dive'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=scuba_diving"
# fixAdd:"shop=scuba_diving"
err.append({'class': 9002001, 'subclass': 1582968978, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','scuba_diving']])
}})
# *[shop=furnace]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'furnace'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"craft=plumber"
# suggestAlternative:"shop=fireplace"
err.append({'class': 9002001, 'subclass': 1155821104, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[sport=paragliding]
if ('sport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sport') == mapcss._value_capture(capture_tags, 0, 'paragliding'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"sport=free_flying"
# fixAdd:"sport=free_flying"
err.append({'class': 9002001, 'subclass': 1531788430, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['sport','free_flying']])
}})
# *[tourism=bed_and_breakfast]
if ('tourism' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tourism') == mapcss._value_capture(capture_tags, 0, 'bed_and_breakfast'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"tourism=guest_house + guest_house=bed_and_breakfast"
# fixAdd:"guest_house=bed_and_breakfast"
# fixAdd:"tourism=guest_house"
err.append({'class': 9002001, 'subclass': 954237438, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['guest_house','bed_and_breakfast'],
['tourism','guest_house']])
}})
# *[diaper=yes]
# *[diaper=no]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper => changing_table"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 1957125311, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table', mapcss.tag(tags, 'diaper')]]),
'-': ([
'diaper'])
}})
# *[diaper][diaper=~/^[1-9][0-9]*$/]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_0f294fdf), mapcss._tag_capture(capture_tags, 1, tags, 'diaper')))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table=yes + changing_table:count=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixAdd:"changing_table=yes"
# fixChangeKey:"diaper => changing_table:count"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 2105051472, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table','yes'],
['changing_table:count', mapcss.tag(tags, 'diaper')]]),
'-': ([
'diaper'])
}})
# *[diaper=room]
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper') == mapcss._value_capture(capture_tags, 0, 'room'))
except mapcss.RuleAbort: pass
if match:
# setdiaper_checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table=dedicated_room"
# suggestAlternative:"changing_table=room"
set_diaper_checked = True
err.append({'class': 9002001, 'subclass': 883202329, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[diaper]!.diaper_checked
if ('diaper' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_diaper_checked and mapcss._tag_capture(capture_tags, 0, tags, 'diaper'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"changing_table"
err.append({'class': 9002001, 'subclass': 693675339, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[diaper:male=yes]
if ('diaper:male' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:male') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=male_toilet"
# fixAdd:"changing_table:location=male_toilet"
# fixRemove:"diaper:male"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 799035479, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','male_toilet']]),
'-': ([
'diaper:male'])
}})
# *[diaper:female=yes]
if ('diaper:female' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:female') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=female_toilet"
# fixAdd:"changing_table:location=female_toilet"
# fixRemove:"diaper:female"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1450901137, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','female_toilet']]),
'-': ([
'diaper:female'])
}})
# *[diaper:unisex=yes]
if ('diaper:unisex' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:unisex') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"changing_table:location=unisex_toilet"
# fixAdd:"changing_table:location=unisex_toilet"
# fixRemove:"diaper:unisex"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1460378712, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:location','unisex_toilet']]),
'-': ([
'diaper:unisex'])
}})
# *[diaper:wheelchair=yes]
# *[diaper:wheelchair=no]
if ('diaper:wheelchair' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:wheelchair') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:wheelchair') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table:wheelchair=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper:wheelchair => changing_table:wheelchair"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 1951967281, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:wheelchair', mapcss.tag(tags, 'diaper:wheelchair')]]),
'-': ([
'diaper:wheelchair'])
}})
# *[diaper:fee=yes]
# *[diaper:fee=no]
if ('diaper:fee' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:fee') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'diaper:fee') == mapcss._value_capture(capture_tags, 0, 'no'))
except mapcss.RuleAbort: pass
if match:
# setdiaper___checked
# group:tr("deprecated tagging")
# suggestAlternative:concat("changing_table:fee=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"diaper:fee => changing_table:fee"
set_diaper___checked = True
err.append({'class': 9002001, 'subclass': 2008573526, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['changing_table:fee', mapcss.tag(tags, 'diaper:fee')]]),
'-': ([
'diaper:fee'])
}})
# *[/^diaper:/]!.diaper___checked
if True:
match = False
if not match:
capture_tags = {}
try: match = (not set_diaper___checked and mapcss._tag_capture(capture_tags, 0, tags, self.re_6029fe03))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","diaper:*")
# suggestAlternative:"changing_table:*"
err.append({'class': 9002001, 'subclass': 26578864, 'text': mapcss.tr('{0} is deprecated', 'diaper:*')})
# *[changing_table][changing_table!~/^(yes|no|limited)$/]
if ('changing_table' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'changing_table') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_787405b1, '^(yes|no|limited)$'), mapcss._tag_capture(capture_tags, 1, tags, 'changing_table')))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("wrong value: {0}","{0.tag}")
# suggestAlternative:"changing_table=limited"
# suggestAlternative:"changing_table=no"
# suggestAlternative:"changing_table=yes"
err.append({'class': 9002019, 'subclass': 1965225408, 'text': mapcss.tr('wrong value: {0}', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[roof:shape=half_hipped]
if ('roof:shape' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'roof:shape') == mapcss._value_capture(capture_tags, 0, 'half_hipped'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"roof:shape=half-hipped"
# fixAdd:"roof:shape=half-hipped"
err.append({'class': 9002001, 'subclass': 1548347123, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['roof:shape','half-hipped']])
}})
# *[bridge_name]
if ('bridge_name' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'bridge_name'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"bridge:name"
# fixChangeKey:"bridge_name => bridge:name"
err.append({'class': 9002001, 'subclass': 80069399, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['bridge:name', mapcss.tag(tags, 'bridge_name')]]),
'-': ([
'bridge_name'])
}})
# *[access=public]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'public'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"access=yes"
# fixAdd:"access=yes"
err.append({'class': 9002001, 'subclass': 1115157097, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['access','yes']])
}})
# *[crossing=island]
if ('crossing' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'crossing') == mapcss._value_capture(capture_tags, 0, 'island'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"crossing:island=yes"
# fixRemove:"crossing"
# fixAdd:"crossing:island=yes"
err.append({'class': 9002001, 'subclass': 1512561318, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['crossing:island','yes']]),
'-': ([
'crossing'])
}})
# *[recycling:metal]
if ('recycling:metal' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'recycling:metal'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"recycling:scrap_metal"
# fixChangeKey:"recycling:metal => recycling:scrap_metal"
err.append({'class': 9002001, 'subclass': 474491272, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['recycling:scrap_metal', mapcss.tag(tags, 'recycling:metal')]]),
'-': ([
'recycling:metal'])
}})
# *[shop=dog_grooming]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'dog_grooming'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=pet_grooming"
# fixAdd:"shop=pet_grooming"
err.append({'class': 9002001, 'subclass': 1073412885, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','pet_grooming']])
}})
# *[tower:type=anchor]
# *[tower:type=suspension]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'anchor'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'suspension'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# suggestAlternative:concat("line_attachment=","{0.value}")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# fixChangeKey:"tower:type => line_attachment"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 180380605, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_attachment', mapcss.tag(tags, 'tower:type')]]),
'-': ([
'tower:type'])
}})
# *[tower:type=branch][branch:type=split]
# *[tower:type=branch][branch:type=loop]
if ('branch:type' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'split'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'loop'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=split"
# fixRemove:"branch:type"
# fixAdd:"line_management=split"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 362350862, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','split']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=branch][!branch:type]
# *[tower:type=branch][branch:type=tap]
if ('branch:type' in keys and 'tower:type' in keys) or ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and not mapcss._tag_capture(capture_tags, 1, tags, 'branch:type'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'tap'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=branch"
# fixRemove:"branch:type"
# fixAdd:"line_management=branch"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 476423517, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','branch']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=branch][branch:type=cross]
if ('branch:type' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'branch') and mapcss._tag_capture(capture_tags, 1, tags, 'branch:type') == mapcss._value_capture(capture_tags, 1, 'cross'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=cross"
# fixRemove:"branch:type"
# fixAdd:"line_management=cross"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 2103059531, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','cross']]),
'-': ([
'branch:type',
'tower:type'])
}})
# *[tower:type=termination]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'termination'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=termination"
# fixAdd:"line_management=termination"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 232235847, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','termination']]),
'-': ([
'tower:type'])
}})
# *[tower:type=transition]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'transition'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"location:transition=yes"
# fixAdd:"location:transition=yes"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1124904944, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['location:transition','yes']]),
'-': ([
'tower:type'])
}})
# *[tower:type=transposing]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'transposing'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"line_management=transpose"
# fixAdd:"line_management=transpose"
# fixRemove:"tower:type"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1795169098, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['line_management','transpose']]),
'-': ([
'tower:type'])
}})
# *[tower:type=crossing]
if ('tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') == mapcss._value_capture(capture_tags, 0, 'crossing'))
except mapcss.RuleAbort: pass
if match:
# setpower_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"height=* + design=*"
set_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 1301565974, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[tower:type][power][power=~/^(tower|pole|insulator|portal|terminal)$/]!.power_tower_type_warning
if ('power' in keys and 'tower:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_power_tower_type_warning and mapcss._tag_capture(capture_tags, 0, tags, 'tower:type') and mapcss._tag_capture(capture_tags, 1, tags, 'power') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 2, self.re_24dfeb95), mapcss._tag_capture(capture_tags, 2, tags, 'power')))
except mapcss.RuleAbort: pass
if match:
# setgeneric_power_tower_type_warning
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"design"
# suggestAlternative:"line_attachment"
# suggestAlternative:"line_management"
# suggestAlternative:"structure"
set_generic_power_tower_type_warning = True
err.append({'class': 9002001, 'subclass': 2020421267, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# *[pole:type][power][power=~/^(tower|pole|insulator|portal|terminal)$/]!.power_pole_type_warning!.generic_power_tower_type_warning
if ('pole:type' in keys and 'power' in keys):
match = False
if not match:
capture_tags = {}
try: match = (not set_power_pole_type_warning and not set_generic_power_tower_type_warning and mapcss._tag_capture(capture_tags, 0, tags, 'pole:type') and mapcss._tag_capture(capture_tags, 1, tags, 'power') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 2, self.re_24dfeb95), mapcss._tag_capture(capture_tags, 2, tags, 'power')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated for {1}","{0.key}","{1.tag}")
# suggestAlternative:"line_attachment"
# suggestAlternative:"line_management"
err.append({'class': 9002001, 'subclass': 1513543887, 'text': mapcss.tr('{0} is deprecated for {1}', mapcss._tag_uncapture(capture_tags, '{0.key}'), mapcss._tag_uncapture(capture_tags, '{1.tag}'))})
# *[sloped_curb=yes][!kerb]
# *[sloped_curb=both][!kerb]
if ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'yes') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'both') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"kerb=lowered"
# fixAdd:"kerb=lowered"
# fixRemove:"sloped_curb"
err.append({'class': 9002001, 'subclass': 1906002413, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['kerb','lowered']]),
'-': ([
'sloped_curb'])
}})
# *[sloped_curb=no][!kerb]
if ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') == mapcss._value_capture(capture_tags, 0, 'no') and not mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"kerb=yes"
# fixAdd:"kerb=yes"
# fixRemove:"sloped_curb"
err.append({'class': 9002001, 'subclass': 893727015, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['kerb','yes']]),
'-': ([
'sloped_curb'])
}})
# *[sloped_curb][sloped_curb!~/^(yes|both|no)$/][!kerb]
# *[sloped_curb][kerb]
if ('kerb' in keys and 'sloped_curb' in keys) or ('sloped_curb' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_01eb1711, '^(yes|both|no)$'), mapcss._tag_capture(capture_tags, 1, tags, 'sloped_curb')) and not mapcss._tag_capture(capture_tags, 2, tags, 'kerb'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'sloped_curb') and mapcss._tag_capture(capture_tags, 1, tags, 'kerb'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"kerb=*"
err.append({'class': 9002001, 'subclass': 1682376745, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}'))})
# *[unnamed=yes]
if ('unnamed' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'unnamed') == mapcss._value_capture(capture_tags, 0, 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"noname=yes"
# fixChangeKey:"unnamed => noname"
err.append({'class': 9002001, 'subclass': 1901447020, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['noname', mapcss.tag(tags, 'unnamed')]]),
'-': ([
'unnamed'])
}})
# *[building:height]
if ('building:height' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:height'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"height"
# fixChangeKey:"building:height => height"
err.append({'class': 9002001, 'subclass': 1328174745, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['height', mapcss.tag(tags, 'building:height')]]),
'-': ([
'building:height'])
}})
# *[building:min_height]
if ('building:min_height' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building:min_height'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"min_height"
# fixChangeKey:"building:min_height => min_height"
err.append({'class': 9002001, 'subclass': 1042683921, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['min_height', mapcss.tag(tags, 'building:min_height')]]),
'-': ([
'building:min_height'])
}})
# *[car][amenity=charging_station]
if ('amenity' in keys and 'car' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'car') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') == mapcss._value_capture(capture_tags, 1, 'charging_station'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"motorcar"
# fixChangeKey:"car => motorcar"
err.append({'class': 9002001, 'subclass': 1165117414, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['motorcar', mapcss.tag(tags, 'car')]]),
'-': ([
'car'])
}})
# *[navigationaid=approach_light]
# *[navigationaid="ALS (Approach lighting system)"]
if ('navigationaid' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'navigationaid') == mapcss._value_capture(capture_tags, 0, 'approach_light'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'navigationaid') == mapcss._value_capture(capture_tags, 0, 'ALS (Approach lighting system)'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"navigationaid=als"
# fixAdd:"navigationaid=als"
err.append({'class': 9002001, 'subclass': 1577817081, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['navigationaid','als']])
}})
# *[water=riverbank][!natural]
if ('water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'riverbank') and not mapcss._tag_capture(capture_tags, 1, tags, 'natural'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=water + water=river"
# fixAdd:"natural=water"
# fixAdd:"water=river"
err.append({'class': 9002001, 'subclass': 186872153, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','water'],
['water','river']])
}})
# *[water=riverbank][natural]
if ('natural' in keys and 'water' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'water') == mapcss._value_capture(capture_tags, 0, 'riverbank') and mapcss._tag_capture(capture_tags, 1, tags, 'natural'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=water + water=river"
err.append({'class': 9002001, 'subclass': 630806094, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[shop=lamps]
if ('shop' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'shop') == mapcss._value_capture(capture_tags, 0, 'lamps'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"shop=lighting"
# fixAdd:"shop=lighting"
err.append({'class': 9002001, 'subclass': 746886011, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['shop','lighting']])
}})
# *[access=customer]
if ('access' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'access') == mapcss._value_capture(capture_tags, 0, 'customer'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"access=customers"
# fixAdd:"access=customers"
err.append({'class': 9002001, 'subclass': 1040065637, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['access','customers']])
}})
# *[addr:inclusion=estimated]
if ('addr:inclusion' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'addr:inclusion') == mapcss._value_capture(capture_tags, 0, 'estimated'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"addr:inclusion=estimate"
# fixAdd:"addr:inclusion=estimate"
err.append({'class': 9002001, 'subclass': 1002643753, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['addr:inclusion','estimate']])
}})
# *[building=apartment]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'apartment'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"building=apartments"
# fixAdd:"building=apartments"
err.append({'class': 9002001, 'subclass': 1384168519, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['building','apartments']])
}})
# *[generator:type=solar_photovoltaic_panels]
if ('generator:type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'generator:type') == mapcss._value_capture(capture_tags, 0, 'solar_photovoltaic_panels'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"generator:type=solar_photovoltaic_panel"
# fixAdd:"generator:type=solar_photovoltaic_panel"
err.append({'class': 9002001, 'subclass': 1146719875, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['generator:type','solar_photovoltaic_panel']])
}})
# *[building=part]
if ('building' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'building') == mapcss._value_capture(capture_tags, 0, 'part'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"building:part=yes"
err.append({'class': 9002001, 'subclass': 455695847, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}'))})
# *[natural=sink_hole]
if ('natural' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'natural') == mapcss._value_capture(capture_tags, 0, 'sink_hole'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"natural=sinkhole"
# fixAdd:"natural=sinkhole"
err.append({'class': 9002001, 'subclass': 1283355945, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['natural','sinkhole']])
}})
# *[climbing:grade:UIAA:min]
if ('climbing:grade:UIAA:min' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:min'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:min"
# fixChangeKey:"climbing:grade:UIAA:min => climbing:grade:uiaa:min"
err.append({'class': 9002001, 'subclass': 1408052420, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:min', mapcss.tag(tags, 'climbing:grade:UIAA:min')]]),
'-': ([
'climbing:grade:UIAA:min'])
}})
# *[climbing:grade:UIAA:max]
if ('climbing:grade:UIAA:max' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:max'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:max"
# fixChangeKey:"climbing:grade:UIAA:max => climbing:grade:uiaa:max"
err.append({'class': 9002001, 'subclass': 1866245426, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:max', mapcss.tag(tags, 'climbing:grade:UIAA:max')]]),
'-': ([
'climbing:grade:UIAA:max'])
}})
# *[climbing:grade:UIAA:mean]
if ('climbing:grade:UIAA:mean' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA:mean'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa:mean"
# fixChangeKey:"climbing:grade:UIAA:mean => climbing:grade:uiaa:mean"
err.append({'class': 9002001, 'subclass': 1022648087, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa:mean', mapcss.tag(tags, 'climbing:grade:UIAA:mean')]]),
'-': ([
'climbing:grade:UIAA:mean'])
}})
# *[climbing:grade:UIAA]
if ('climbing:grade:UIAA' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'climbing:grade:UIAA'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"climbing:grade:uiaa"
# fixChangeKey:"climbing:grade:UIAA => climbing:grade:uiaa"
err.append({'class': 9002001, 'subclass': 1007893519, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['climbing:grade:uiaa', mapcss.tag(tags, 'climbing:grade:UIAA')]]),
'-': ([
'climbing:grade:UIAA'])
}})
# *[cuisine][cuisine=~/^(?i)(bbq)$/]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'cuisine') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_2f881233), mapcss._tag_capture(capture_tags, 1, tags, 'cuisine')))
except mapcss.RuleAbort: pass
if match:
# setbbq_autofix
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"cuisine=barbecue"
# fixAdd:"cuisine=barbecue"
set_bbq_autofix = True
err.append({'class': 9002001, 'subclass': 1943338875, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['cuisine','barbecue']])
}})
# *[cuisine=~/(?i)(;bbq|bbq;)/][cuisine!~/(?i)(_bbq)/]
if ('cuisine' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_340a2b31), mapcss._tag_capture(capture_tags, 0, tags, 'cuisine')) and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_7d409ed5, '(?i)(_bbq)'), mapcss._tag_capture(capture_tags, 1, tags, 'cuisine')))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","cuisine=bbq")
# suggestAlternative:"cuisine=barbecue"
err.append({'class': 9002001, 'subclass': 1958782130, 'text': mapcss.tr('{0} is deprecated', 'cuisine=bbq')})
# *[Fixme]
if ('Fixme' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'Fixme'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.key}")
# suggestAlternative:"fixme"
# fixChangeKey:"Fixme => fixme"
err.append({'class': 9002001, 'subclass': 592643943, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.key}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['fixme', mapcss.tag(tags, 'Fixme')]]),
'-': ([
'Fixme'])
}})
# *[amenity=embassy]
if ('amenity' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'amenity') == mapcss._value_capture(capture_tags, 0, 'embassy'))
except mapcss.RuleAbort: pass
if match:
# group:tr("deprecated tagging")
# throwWarning:tr("{0} is deprecated","{0.tag}")
# suggestAlternative:"office=diplomatic + diplomatic=embassy"
# fixChangeKey:"amenity => diplomatic"
# fixAdd:"office=diplomatic"
err.append({'class': 9002001, 'subclass': 1751915206, 'text': mapcss.tr('{0} is deprecated', mapcss._tag_uncapture(capture_tags, '{0.tag}')), 'allow_fix_override': True, 'fix': {
'+': dict([
['diplomatic', mapcss.tag(tags, 'amenity')],
['office','diplomatic']]),
'-': ([
'amenity'])
}})
return err
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
n = Josm_deprecated(None)
class _config:
options = {"country": None, "language": None}
class father:
config = _config()
n.father = father()
n.init(None)
data = {'id': 0, 'lat': 0, 'lon': 0}
self.check_err(n.node(data, {'day_on': '0-12'}), expected={'class': 9002001, 'subclass': 294264920})
self.check_err(n.node(data, {'name': 'FIXME'}), expected={'class': 9002005, 'subclass': 642340557})
self.check_err(n.node(data, {'name': 'Fixme'}), expected={'class': 9002005, 'subclass': 642340557})
self.check_err(n.node(data, {'name': 'fixme'}), expected={'class': 9002005, 'subclass': 642340557})
self.check_not_err(n.node(data, {'name': 'valid name'}), expected={'class': 9002005, 'subclass': 642340557})
self.check_err(n.node(data, {'f': 'b'}), expected={'class': 9002012, 'subclass': 79709106})
self.check_err(n.node(data, {'fo': 'bar'}), expected={'class': 9002012, 'subclass': 79709106})
self.check_not_err(n.node(data, {'kp': '5'}), expected={'class': 9002012, 'subclass': 79709106})
self.check_not_err(n.node(data, {'pk': '7'}), expected={'class': 9002012, 'subclass': 79709106})
self.check_not_err(n.node(data, {'emergency_telephone_code': '456', 'highway': 'emergency_access_point'}), expected={'class': 9002001, 'subclass': 1339208019})
self.check_not_err(n.node(data, {'emergency_telephone_code': '456', 'highway': 'emergency_access_point', 'phone': '123'}), expected={'class': 9002001, 'subclass': 1339208019})
self.check_err(n.node(data, {'highway': 'emergency_access_point', 'phone': '123'}), expected={'class': 9002001, 'subclass': 1339208019})
self.check_not_err(n.node(data, {'phone': '123'}), expected={'class': 9002001, 'subclass': 1339208019})
self.check_not_err(n.node(data, {'emergency_telephone_code': '123', 'highway': 'emergency_access_point'}), expected={'class': 9002001, 'subclass': 342466099})
self.check_err(n.node(data, {'emergency_telephone_code': '123', 'highway': 'emergency_access_point', 'phone': '123'}), expected={'class': 9002001, 'subclass': 342466099})
self.check_not_err(n.node(data, {'highway': 'emergency_access_point', 'phone': '123'}), expected={'class': 9002001, 'subclass': 342466099})
self.check_not_err(n.node(data, {'emergency_telephone_code': '123', 'highway': 'emergency_access_point'}), expected={'class': 9002001, 'subclass': 663070970})
self.check_not_err(n.node(data, {'emergency_telephone_code': '123', 'highway': 'emergency_access_point', 'phone': '123'}), expected={'class': 9002001, 'subclass': 663070970})
self.check_not_err(n.node(data, {'highway': 'emergency_access_point', 'phone': '123'}), expected={'class': 9002001, 'subclass': 663070970})
self.check_not_err(n.way(data, {'barrier': 'fence'}, [0]), expected={'class': 9002001, 'subclass': 1107799632})
self.check_err(n.way(data, {'barrier': 'wire_fence'}, [0]), expected={'class': 9002001, 'subclass': 1107799632})
self.check_err(n.way(data, {'access': 'designated'}, [0]), expected={'class': 9002002, 'subclass': 2057594338})
self.check_err(n.way(data, {'access': 'official'}, [0]), expected={'class': 9002003, 'subclass': 1909133836})
self.check_err(n.way(data, {'fixme': 'yes'}, [0]), expected={'class': 9002004, 'subclass': 136657482})
self.check_err(n.way(data, {'natural': 'land'}, [0]), expected={'class': 9002001, 'subclass': 94558529})
self.check_not_err(n.way(data, {'color': 'red', 'colour': 'red'}, [0]), expected={'class': 9002001, 'subclass': 1850270072})
self.check_err(n.way(data, {'color': 'red'}, [0]), expected={'class': 9002001, 'subclass': 1850270072})
self.check_not_err(n.way(data, {'color': 'red', 'colour': 'green'}, [0]), expected={'class': 9002001, 'subclass': 1825345743})
self.check_err(n.way(data, {'color': 'red', 'colour': 'red'}, [0]), expected={'class': 9002001, 'subclass': 1825345743})
self.check_err(n.way(data, {'color': 'red', 'colour': 'green'}, [0]), expected={'class': 9002001, 'subclass': 1064658218})
self.check_not_err(n.way(data, {'color': 'red', 'colour': 'red'}, [0]), expected={'class': 9002001, 'subclass': 1064658218})
self.check_not_err(n.way(data, {'color': 'red'}, [0]), expected={'class': 9002001, 'subclass': 1632389707})
self.check_err(n.way(data, {'cycleway:surface:color': 'grey'}, [0]), expected={'class': 9002001, 'subclass': 1632389707})
self.check_not_err(n.way(data, {'roof:color': 'grey'}, [0]), expected={'class': 9002001, 'subclass': 1632389707})
self.check_err(n.way(data, {'color:back': 'grey'}, [0]), expected={'class': 9002001, 'subclass': 1390370717})
self.check_not_err(n.way(data, {'color': 'red'}, [0]), expected={'class': 9002001, 'subclass': 1390370717})
self.check_not_err(n.way(data, {'route': 'ferry', 'to': 'Zuidschermer;Akersloot'}, [0]), expected={'class': 9002012, 'subclass': 1765060211})
self.check_err(n.way(data, {'to': 'bar'}, [0]), expected={'class': 9002012, 'subclass': 1765060211})
self.check_not_err(n.way(data, {'description_3': 'foo'}, [0]), expected={'class': 9002014, 'subclass': 2081989305})
self.check_err(n.way(data, {'name_1': 'foo'}, [0]), expected={'class': 9002014, 'subclass': 2081989305})
self.check_not_err(n.way(data, {'note_2': 'foo'}, [0]), expected={'class': 9002014, 'subclass': 2081989305})
self.check_not_err(n.way(data, {'tiger:name_base_1': 'bar'}, [0]), expected={'class': 9002014, 'subclass': 2081989305})
self.check_not_err(n.way(data, {'building': 'supermarket', 'building:type': 'church'}, [0]), expected={'class': 9002001, 'subclass': 1927794430})
self.check_err(n.way(data, {'building': 'yes', 'building:type': 'church'}, [0]), expected={'class': 9002001, 'subclass': 1927794430})
self.check_err(n.way(data, {'building:type': 'church'}, [0]), expected={'class': 9002001, 'subclass': 1927794430})
self.check_err(n.way(data, {'building': 'supermarket', 'building:type': 'church'}, [0]), expected={'class': 9002001, 'subclass': 1133239698})
self.check_not_err(n.way(data, {'building': 'yes', 'building:type': 'church'}, [0]), expected={'class': 9002001, 'subclass': 1133239698})
self.check_not_err(n.way(data, {'building:type': 'church'}, [0]), expected={'class': 9002001, 'subclass': 1133239698})
self.check_err(n.way(data, {'cuisine': 'BBQ'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_err(n.way(data, {'cuisine': 'bbq'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'bbq;pizza'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'korean_bbq'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'korean_bbq;bbq'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'pasta;bbq;pizza'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'pizza;Bbq'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'pizza;bbq'}, [0]), expected={'class': 9002001, 'subclass': 1943338875})
self.check_not_err(n.way(data, {'cuisine': 'BBQ'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_not_err(n.way(data, {'cuisine': 'bbq'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_err(n.way(data, {'cuisine': 'bbq;pizza'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_not_err(n.way(data, {'cuisine': 'korean_bbq'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_not_err(n.way(data, {'cuisine': 'korean_bbq;bbq'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_err(n.way(data, {'cuisine': 'pasta;bbq;pizza'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_err(n.way(data, {'cuisine': 'pizza;Bbq'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_err(n.way(data, {'cuisine': 'pizza;bbq'}, [0]), expected={'class': 9002001, 'subclass': 1958782130})
self.check_not_err(n.way(data, {'FIXME': 'foo'}, [0]), expected={'class': 9002001, 'subclass': 592643943})
self.check_err(n.way(data, {'Fixme': 'foo'}, [0]), expected={'class': 9002001, 'subclass': 592643943})
self.check_not_err(n.way(data, {'fixme': 'foo'}, [0]), expected={'class': 9002001, 'subclass': 592643943})
self.check_err(n.relation(data, {'fo': 'bar'}, []), expected={'class': 9002012, 'subclass': 518970721})
self.check_not_err(n.relation(data, {'to': 'Berlin'}, []), expected={'class': 9002012, 'subclass': 518970721})
| tkasp/osmose-backend | plugins/Josm_deprecated.py | Python | gpl-3.0 | 787,125 | [
"CASINO"
] | 4c5c0592a5b90b5115d6881136864d78c407f6043d52bbf1fdefd75b4d5f5899 |
#!/usr/bin/env python
'''
Anh Nguyen <anh.ng8@gmail.com>
2016
'''
from __future__ import print_function
import os, sys
os.environ['GLOG_minloglevel'] = '2' # suprress Caffe verbose prints
import settings
sys.path.insert(0, settings.caffe_root)
import caffe
import numpy as np
from numpy.linalg import norm
import scipy.misc, scipy.io
from scipy.ndimage.filters import gaussian_laplace as laplace_filter
import util
from style import StyleTransfer
class Sampler(object):
def load_image(self, shape, path, output_dir='', save=True):
''' loads an image in bgr format '''
images = np.zeros(shape, dtype='float32')
image_size = shape[2:]
in_image = scipy.misc.imread(path)
in_image = scipy.misc.imresize(in_image, (image_size[0], image_size[1]))
images[0] = np.transpose(in_image, (2, 0, 1)) # convert to (3, 227, 227) format
data = images[:,::-1] # convert from RGB to BGR
if save:
name = "%s/samples/%s.jpg" % (output_dir, 'start')
util.save_image(data, name)
return data
def get_code(self, encoder, data, layer, output_dir='', mask=None):
'''
Push the given image through an encoder (here, AlexNet) to get a code.
'''
# set up the inputs for the net:
image_size = encoder.blobs['data'].shape[2:] # (1, 3, 227, 227)
# subtract the ImageNet mean
image_mean = scipy.io.loadmat('misc/ilsvrc_2012_mean.mat')['image_mean'] # (256, 256, 3)
topleft = self.compute_topleft(image_size, image_mean.shape[:2])
image_mean = image_mean[topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] # crop the image mean
data -= np.expand_dims(np.transpose(image_mean, (2,0,1)), 0) # mean is already BGR
# initialize the encoder
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
# extract the features
if mask is not None:
encoder.forward(data=data*mask)
else:
encoder.forward(data=data)
features = encoder.blobs[layer].data.copy()
return features
def backward_from_x_to_h(self, generator, diff, start, end):
'''
Backpropagate the gradient from the image (start) back to the latent space (end) of the generator network.
'''
dst = generator.blobs[end]
dst.diff[...] = diff
generator.backward(start=end)
g = generator.blobs[start].diff.copy()
dst.diff.fill(0.) # reset objective after each step
return g
def compute_topleft(self, input_size, output_size):
'''
Compute the offsets (top, left) to crop the output image if its size does not match that of the input image.
The output size is fixed at 256 x 256 as the generator network is trained on 256 x 256 images.
However, the input size often changes depending on the network.
'''
assert len(input_size) == 2, "input_size must be (h, w)"
assert len(output_size) == 2, "output_size must be (h, w)"
topleft = ((output_size[0] - input_size[0])/2, (output_size[1] - input_size[1])/2)
return topleft
def h_autoencoder_grad(self, h, encoder, decoder, gen_out_layer, topleft, mask=None, input_image=None):
'''
Compute the gradient of the energy of P(input) wrt input, which is given by decode(encode(input))-input {see Alain & Bengio, 2014}.
Specifically, we compute E(G(h)) - h.
Note: this is an "upside down" auto-encoder for h that goes h -> x -> h with G modeling h -> x and E modeling x -> h.
'''
generated = encoder.forward(feat=h)
x0 = encoder.blobs[gen_out_layer].data.copy() # 256x256
# Crop from 256x256 to 227x227
image_size = decoder.blobs['data'].shape # (1, 3, 227, 227)
cropped_x0 = x0[:,:,topleft[0]:topleft[0]+image_size[2], topleft[1]:topleft[1]+image_size[3]]
if mask is not None:
cropped_x0 = mask * input_image + (1 - mask) * cropped_x0
# Push this 227x227 image through net
decoder.forward(data=cropped_x0)
code = decoder.blobs['fc6'].data
g = code - h
return g
def get_edge_gradient(self, input_image, generated_image, edge_detector):
''' Return edge gradient'''
# calculate the edges of the images
input_edge = edge_detector.forward(data=input_image)['laplace'].copy()
generated_edge = edge_detector.forward(data=generated_image)['laplace'].copy()
# l2 norm derivative is just the difference
diff = input_edge - generated_edge
# backprop thru
dst = edge_detector.blobs['laplace']
dst.diff[...] = diff
edge_detector.backward(start='laplace')
g = edge_detector.blobs['data'].diff.copy()
dst.diff.fill(0.) # reset objective after each step
return g
# def sampling( self, condition_net, image_encoder, image_net, image_generator, edge_detector,
# gen_in_layer, gen_out_layer, start_code, content_layer,
# n_iters, lr, lr_end, threshold,
# layer, conditions, mask_inner=None, , input_image=None, #units=None, xy=0,
# epsilon1=1, epsilon2=1, epsilon3=1e-10,
# mask_epsilon=1e-6, edge_epsilon=1e-8,
# style_epsilon=1e-8, content_epsilon=1e-8,
# output_dir=None, reset_every=0, save_every=1):
def sampling( self, condition_net, image_encoder, image_net, image_generator, edge_detector,
gen_in_layer, gen_out_layer, start_code, content_layer,
n_iters, lr, lr_end, threshold,
layer, conditions, mask=None, input_image=None, #units=None, xy=0,
epsilon1=1, epsilon2=1, epsilon3=1e-10,
mask_epsilon=1e-6, edge_epsilon=1e-8,
style_epsilon=1e-8, content_epsilon=1e-8,
output_dir=None, reset_every=0, save_every=1):
# Get the input and output sizes
image_shape = condition_net.blobs['data'].data.shape
generator_output_shape = image_generator.blobs[gen_out_layer].data.shape
encoder_input_shape = image_encoder.blobs['data'].data.shape
# Calculate the difference between the input image of the condition net
# and the output image from the generator
image_size = util.get_image_size(image_shape)
generator_output_size = util.get_image_size(generator_output_shape)
encoder_input_size = util.get_image_size(encoder_input_shape)
# The top left offset to crop the output image to get a 227x227 image
topleft = self.compute_topleft(image_size, generator_output_size)
topleft_DAE = self.compute_topleft(encoder_input_size, generator_output_size)
src = image_generator.blobs[gen_in_layer] # the input feature layer of the generator
# Make sure the layer size and initial vector size match
assert src.data.shape == start_code.shape
use_style_transfer= style_epsilon != 0 or content_epsilon !=0
# setup style transfer
if input_image is not None and use_style_transfer:
style_transfer = StyleTransfer(image_net, style_weight=style_epsilon, content_weight=content_epsilon)
style_transfer.init_image(input_image)
elif input_image is None:
# TODO setup loading the vector components
raise NotImplementedError('input image must not be None')
elif not use_style_transfer:
print('not using style transfer')
# Variables to store the best sample
last_xx = np.zeros(image_shape) # best image
last_prob = -sys.maxint # highest probability
h = start_code.copy()
condition_idx = 0
list_samples = []
i = 0
while True:
step_size = lr + ((lr_end - lr) * i) / n_iters
condition = conditions[condition_idx] # Select a class
# 1. Compute the epsilon1 term ---
# : compute gradient d log(p(h)) / dh per DAE results in Alain & Bengio 2014
d_prior = self.h_autoencoder_grad(h=h, encoder=image_generator, decoder=image_encoder, gen_out_layer=gen_out_layer, topleft=topleft_DAE, mask=mask, input_image=input_image)
# 2. Compute the epsilon2 term ---
# Push the code through the generator to get an image x
image_generator.blobs["feat"].data[:] = h
generated = image_generator.forward()
x = generated[gen_out_layer].copy() # 256x256
# Crop from 256x256 to 227x227
cropped_x_nomask = x[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]]
if mask is not None:
cropped_x = mask * input_image + (1 - mask) * cropped_x_nomask
else:
cropped_x = cropped_x_nomask
# Forward pass the image x to the condition net up to an unit k at the given layer
# Backprop the gradient through the condition net to the image layer to get a gradient image
d_condition_x, prob, info = self.forward_backward_from_x_to_condition(net=condition_net, end=layer, image=cropped_x, condition=condition)
if mask is not None:
generated_image = (1 - mask) * d_condition_x
else:
generated_image = d_condition_x
d_edge = self.get_edge_gradient(input_image, generated_image, edge_detector)
d_condition_x = epsilon2 * generated_image + edge_epsilon * d_edge
if use_style_transfer:
d_condition_x += style_transfer.get_gradient(generated_image)
if mask is not None:
d_condition_x += mask_epsilon * (mask) * (input_image - cropped_x_nomask)
# Put the gradient back in the 256x256 format
d_condition_x256 = np.zeros_like(x)
d_condition_x256[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] = d_condition_x.copy()
# Backpropagate the above gradient all the way to h (through generator)
# This gradient 'd_condition' is d log(p(y|h)) / dh (the epsilon2 term in Eq. 11 in the paper)
d_condition = self.backward_from_x_to_h(generator=image_generator, diff=d_condition_x256, start=gen_in_layer, end=gen_out_layer)
# if i % 10 == 0:
# self.print_progress(i, info, condition, prob, d_condition)
# 3. Compute the epsilon3 term ---
noise = np.zeros_like(h)
if epsilon3 > 0:
noise = np.random.normal(0, epsilon3, h.shape) # Gaussian noise
d_h = epsilon1 * d_prior
d_h += d_condition
d_h += noise
h += step_size/np.abs(d_h).mean() * d_h
h = np.clip(h, a_min=0, a_max=30) # Keep the code within a realistic range
# Reset the code every N iters (for diversity when running a long sampling chain)
if reset_every > 0 and i % reset_every == 0 and i > 0:
h = np.random.normal(0, 1, h.shape)
# Experimental: For sample diversity, it's a good idea to randomly pick epsilon1 as well
epsilon1 = np.random.uniform(low=1e-6, high=1e-2)
# Save every sample
last_xx = cropped_x.copy()
last_prob = prob
# Filter samples based on threshold or every N iterations
if save_every > 0 and i % save_every == 0 and prob > threshold:
name = "%s/samples/%05d.jpg" % (output_dir, i)
label = self.get_label(condition)
if mask is not None:
image = last_xx * mask + (1 - mask) * input_image
else:
image = last_xx
# TODO check why this wasn't the case
# list_samples.append( (last_xx.copy(), name, label) )
list_samples.append( (image.copy(), name, label) )
# Stop if grad is 0
if norm(d_h) == 0:
print(" d_h is 0")
break
# Randomly sample a class every N iterations
if i > 0 and i % n_iters == 0:
condition_idx += 1
if condition_idx == len(conditions):
break
i += 1 # Next iter
# returning the last sample
print( "-------------------------")
print("Last sample: prob [%s] " % last_prob)
# if mask is not None:
# return last_xx * mask + (1 - mask) * input_image, list_samples
return last_xx, list_samples
| philkuz/ppgn | sampler.py | Python | mit | 12,786 | [
"Gaussian"
] | 92c91d125ed9722c3edb23d826ef09085db40f4bebcada1681aea3a81957b480 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.